OpenCores
URL https://opencores.org/ocsvn/hf-risc/hf-risc/trunk

Subversion Repositories hf-risc

[/] [hf-risc/] [trunk/] [tools/] [riscv-gnu-toolchain-master/] [linux-headers/] [include/] [drm/] [vmwgfx_drm.h] - Blame information for rev 13

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 13 serginhofr
/**************************************************************************
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#ifndef __VMWGFX_DRM_H__
29
#define __VMWGFX_DRM_H__
30
 
31
#include <drm/drm.h>
32
 
33
#define DRM_VMW_MAX_SURFACE_FACES 6
34
#define DRM_VMW_MAX_MIP_LEVELS 24
35
 
36
 
37
#define DRM_VMW_GET_PARAM            0
38
#define DRM_VMW_ALLOC_DMABUF         1
39
#define DRM_VMW_UNREF_DMABUF         2
40
#define DRM_VMW_CURSOR_BYPASS        3
41
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
42
#define DRM_VMW_CONTROL_STREAM       4
43
#define DRM_VMW_CLAIM_STREAM         5
44
#define DRM_VMW_UNREF_STREAM         6
45
/* guarded by DRM_VMW_PARAM_3D == 1 */
46
#define DRM_VMW_CREATE_CONTEXT       7
47
#define DRM_VMW_UNREF_CONTEXT        8
48
#define DRM_VMW_CREATE_SURFACE       9
49
#define DRM_VMW_UNREF_SURFACE        10
50
#define DRM_VMW_REF_SURFACE          11
51
#define DRM_VMW_EXECBUF              12
52
#define DRM_VMW_GET_3D_CAP           13
53
#define DRM_VMW_FENCE_WAIT           14
54
#define DRM_VMW_FENCE_SIGNALED       15
55
#define DRM_VMW_FENCE_UNREF          16
56
#define DRM_VMW_FENCE_EVENT          17
57
#define DRM_VMW_PRESENT              18
58
#define DRM_VMW_PRESENT_READBACK     19
59
#define DRM_VMW_UPDATE_LAYOUT        20
60
#define DRM_VMW_CREATE_SHADER        21
61
#define DRM_VMW_UNREF_SHADER         22
62
#define DRM_VMW_GB_SURFACE_CREATE    23
63
#define DRM_VMW_GB_SURFACE_REF       24
64
#define DRM_VMW_SYNCCPU              25
65
 
66
/*************************************************************************/
67
/**
68
 * DRM_VMW_GET_PARAM - get device information.
69
 *
70
 * DRM_VMW_PARAM_FIFO_OFFSET:
71
 * Offset to use to map the first page of the FIFO read-only.
72
 * The fifo is mapped using the mmap() system call on the drm device.
73
 *
74
 * DRM_VMW_PARAM_OVERLAY_IOCTL:
75
 * Does the driver support the overlay ioctl.
76
 */
77
 
78
#define DRM_VMW_PARAM_NUM_STREAMS      0
79
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
80
#define DRM_VMW_PARAM_3D               2
81
#define DRM_VMW_PARAM_HW_CAPS          3
82
#define DRM_VMW_PARAM_FIFO_CAPS        4
83
#define DRM_VMW_PARAM_MAX_FB_SIZE      5
84
#define DRM_VMW_PARAM_FIFO_HW_VERSION  6
85
#define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
86
#define DRM_VMW_PARAM_3D_CAPS_SIZE     8
87
#define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
88
#define DRM_VMW_PARAM_MAX_MOB_SIZE     10
89
 
90
/**
91
 * struct drm_vmw_getparam_arg
92
 *
93
 * @value: Returned value. //Out
94
 * @param: Parameter to query. //In.
95
 *
96
 * Argument to the DRM_VMW_GET_PARAM Ioctl.
97
 */
98
 
99
struct drm_vmw_getparam_arg {
100
        uint64_t value;
101
        uint32_t param;
102
        uint32_t pad64;
103
};
104
 
105
/*************************************************************************/
106
/**
107
 * DRM_VMW_CREATE_CONTEXT - Create a host context.
108
 *
109
 * Allocates a device unique context id, and queues a create context command
110
 * for the host. Does not wait for host completion.
111
 */
112
 
113
/**
114
 * struct drm_vmw_context_arg
115
 *
116
 * @cid: Device unique context ID.
117
 *
118
 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
119
 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
120
 */
121
 
122
struct drm_vmw_context_arg {
123
        int32_t cid;
124
        uint32_t pad64;
125
};
126
 
127
/*************************************************************************/
128
/**
129
 * DRM_VMW_UNREF_CONTEXT - Create a host context.
130
 *
131
 * Frees a global context id, and queues a destroy host command for the host.
132
 * Does not wait for host completion. The context ID can be used directly
133
 * in the command stream and shows up as the same context ID on the host.
134
 */
135
 
136
/*************************************************************************/
137
/**
138
 * DRM_VMW_CREATE_SURFACE - Create a host suface.
139
 *
140
 * Allocates a device unique surface id, and queues a create surface command
141
 * for the host. Does not wait for host completion. The surface ID can be
142
 * used directly in the command stream and shows up as the same surface
143
 * ID on the host.
144
 */
145
 
146
/**
147
 * struct drm_wmv_surface_create_req
148
 *
149
 * @flags: Surface flags as understood by the host.
150
 * @format: Surface format as understood by the host.
151
 * @mip_levels: Number of mip levels for each face.
152
 * An unused face should have 0 encoded.
153
 * @size_addr: Address of a user-space array of sruct drm_vmw_size
154
 * cast to an uint64_t for 32-64 bit compatibility.
155
 * The size of the array should equal the total number of mipmap levels.
156
 * @shareable: Boolean whether other clients (as identified by file descriptors)
157
 * may reference this surface.
158
 * @scanout: Boolean whether the surface is intended to be used as a
159
 * scanout.
160
 *
161
 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
162
 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
163
 */
164
 
165
struct drm_vmw_surface_create_req {
166
        uint32_t flags;
167
        uint32_t format;
168
        uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
169
        uint64_t size_addr;
170
        int32_t shareable;
171
        int32_t scanout;
172
};
173
 
174
/**
175
 * struct drm_wmv_surface_arg
176
 *
177
 * @sid: Surface id of created surface or surface to destroy or reference.
178
 *
179
 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
180
 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
181
 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
182
 */
183
 
184
struct drm_vmw_surface_arg {
185
        int32_t sid;
186
        uint32_t pad64;
187
};
188
 
189
/**
190
 * struct drm_vmw_size ioctl.
191
 *
192
 * @width - mip level width
193
 * @height - mip level height
194
 * @depth - mip level depth
195
 *
196
 * Description of a mip level.
197
 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
198
 */
199
 
200
struct drm_vmw_size {
201
        uint32_t width;
202
        uint32_t height;
203
        uint32_t depth;
204
        uint32_t pad64;
205
};
206
 
207
/**
208
 * union drm_vmw_surface_create_arg
209
 *
210
 * @rep: Output data as described above.
211
 * @req: Input data as described above.
212
 *
213
 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
214
 */
215
 
216
union drm_vmw_surface_create_arg {
217
        struct drm_vmw_surface_arg rep;
218
        struct drm_vmw_surface_create_req req;
219
};
220
 
221
/*************************************************************************/
222
/**
223
 * DRM_VMW_REF_SURFACE - Reference a host surface.
224
 *
225
 * Puts a reference on a host surface with a give sid, as previously
226
 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
227
 * A reference will make sure the surface isn't destroyed while we hold
228
 * it and will allow the calling client to use the surface ID in the command
229
 * stream.
230
 *
231
 * On successful return, the Ioctl returns the surface information given
232
 * in the DRM_VMW_CREATE_SURFACE ioctl.
233
 */
234
 
235
/**
236
 * union drm_vmw_surface_reference_arg
237
 *
238
 * @rep: Output data as described above.
239
 * @req: Input data as described above.
240
 *
241
 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
242
 */
243
 
244
union drm_vmw_surface_reference_arg {
245
        struct drm_vmw_surface_create_req rep;
246
        struct drm_vmw_surface_arg req;
247
};
248
 
249
/*************************************************************************/
250
/**
251
 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
252
 *
253
 * Clear a reference previously put on a host surface.
254
 * When all references are gone, including the one implicitly placed
255
 * on creation,
256
 * a destroy surface command will be queued for the host.
257
 * Does not wait for completion.
258
 */
259
 
260
/*************************************************************************/
261
/**
262
 * DRM_VMW_EXECBUF
263
 *
264
 * Submit a command buffer for execution on the host, and return a
265
 * fence seqno that when signaled, indicates that the command buffer has
266
 * executed.
267
 */
268
 
269
/**
270
 * struct drm_vmw_execbuf_arg
271
 *
272
 * @commands: User-space address of a command buffer cast to an uint64_t.
273
 * @command-size: Size in bytes of the command buffer.
274
 * @throttle-us: Sleep until software is less than @throttle_us
275
 * microseconds ahead of hardware. The driver may round this value
276
 * to the nearest kernel tick.
277
 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
278
 * uint64_t.
279
 * @version: Allows expanding the execbuf ioctl parameters without breaking
280
 * backwards compatibility, since user-space will always tell the kernel
281
 * which version it uses.
282
 * @flags: Execbuf flags. None currently.
283
 *
284
 * Argument to the DRM_VMW_EXECBUF Ioctl.
285
 */
286
 
287
#define DRM_VMW_EXECBUF_VERSION 1
288
 
289
struct drm_vmw_execbuf_arg {
290
        uint64_t commands;
291
        uint32_t command_size;
292
        uint32_t throttle_us;
293
        uint64_t fence_rep;
294
        uint32_t version;
295
        uint32_t flags;
296
};
297
 
298
/**
299
 * struct drm_vmw_fence_rep
300
 *
301
 * @handle: Fence object handle for fence associated with a command submission.
302
 * @mask: Fence flags relevant for this fence object.
303
 * @seqno: Fence sequence number in fifo. A fence object with a lower
304
 * seqno will signal the EXEC flag before a fence object with a higher
305
 * seqno. This can be used by user-space to avoid kernel calls to determine
306
 * whether a fence has signaled the EXEC flag. Note that @seqno will
307
 * wrap at 32-bit.
308
 * @passed_seqno: The highest seqno number processed by the hardware
309
 * so far. This can be used to mark user-space fence objects as signaled, and
310
 * to determine whether a fence seqno might be stale.
311
 * @error: This member should've been set to -EFAULT on submission.
312
 * The following actions should be take on completion:
313
 * error == -EFAULT: Fence communication failed. The host is synchronized.
314
 * Use the last fence id read from the FIFO fence register.
315
 * error != 0 && error != -EFAULT:
316
 * Fence submission failed. The host is synchronized. Use the fence_seq member.
317
 * error == 0: All is OK, The host may not be synchronized.
318
 * Use the fence_seq member.
319
 *
320
 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
321
 */
322
 
323
struct drm_vmw_fence_rep {
324
        uint32_t handle;
325
        uint32_t mask;
326
        uint32_t seqno;
327
        uint32_t passed_seqno;
328
        uint32_t pad64;
329
        int32_t error;
330
};
331
 
332
/*************************************************************************/
333
/**
334
 * DRM_VMW_ALLOC_DMABUF
335
 *
336
 * Allocate a DMA buffer that is visible also to the host.
337
 * NOTE: The buffer is
338
 * identified by a handle and an offset, which are private to the guest, but
339
 * useable in the command stream. The guest kernel may translate these
340
 * and patch up the command stream accordingly. In the future, the offset may
341
 * be zero at all times, or it may disappear from the interface before it is
342
 * fixed.
343
 *
344
 * The DMA buffer may stay user-space mapped in the guest at all times,
345
 * and is thus suitable for sub-allocation.
346
 *
347
 * DMA buffers are mapped using the mmap() syscall on the drm device.
348
 */
349
 
350
/**
351
 * struct drm_vmw_alloc_dmabuf_req
352
 *
353
 * @size: Required minimum size of the buffer.
354
 *
355
 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
356
 */
357
 
358
struct drm_vmw_alloc_dmabuf_req {
359
        uint32_t size;
360
        uint32_t pad64;
361
};
362
 
363
/**
364
 * struct drm_vmw_dmabuf_rep
365
 *
366
 * @map_handle: Offset to use in the mmap() call used to map the buffer.
367
 * @handle: Handle unique to this buffer. Used for unreferencing.
368
 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
369
 * referenced. See not above.
370
 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
371
 * referenced. See note above.
372
 *
373
 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
374
 */
375
 
376
struct drm_vmw_dmabuf_rep {
377
        uint64_t map_handle;
378
        uint32_t handle;
379
        uint32_t cur_gmr_id;
380
        uint32_t cur_gmr_offset;
381
        uint32_t pad64;
382
};
383
 
384
/**
385
 * union drm_vmw_dmabuf_arg
386
 *
387
 * @req: Input data as described above.
388
 * @rep: Output data as described above.
389
 *
390
 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
391
 */
392
 
393
union drm_vmw_alloc_dmabuf_arg {
394
        struct drm_vmw_alloc_dmabuf_req req;
395
        struct drm_vmw_dmabuf_rep rep;
396
};
397
 
398
/*************************************************************************/
399
/**
400
 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
401
 *
402
 */
403
 
404
/**
405
 * struct drm_vmw_unref_dmabuf_arg
406
 *
407
 * @handle: Handle indicating what buffer to free. Obtained from the
408
 * DRM_VMW_ALLOC_DMABUF Ioctl.
409
 *
410
 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
411
 */
412
 
413
struct drm_vmw_unref_dmabuf_arg {
414
        uint32_t handle;
415
        uint32_t pad64;
416
};
417
 
418
/*************************************************************************/
419
/**
420
 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
421
 *
422
 * This IOCTL controls the overlay units of the svga device.
423
 * The SVGA overlay units does not work like regular hardware units in
424
 * that they do not automaticaly read back the contents of the given dma
425
 * buffer. But instead only read back for each call to this ioctl, and
426
 * at any point between this call being made and a following call that
427
 * either changes the buffer or disables the stream.
428
 */
429
 
430
/**
431
 * struct drm_vmw_rect
432
 *
433
 * Defines a rectangle. Used in the overlay ioctl to define
434
 * source and destination rectangle.
435
 */
436
 
437
struct drm_vmw_rect {
438
        int32_t x;
439
        int32_t y;
440
        uint32_t w;
441
        uint32_t h;
442
};
443
 
444
/**
445
 * struct drm_vmw_control_stream_arg
446
 *
447
 * @stream_id: Stearm to control
448
 * @enabled: If false all following arguments are ignored.
449
 * @handle: Handle to buffer for getting data from.
450
 * @format: Format of the overlay as understood by the host.
451
 * @width: Width of the overlay.
452
 * @height: Height of the overlay.
453
 * @size: Size of the overlay in bytes.
454
 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
455
 * @offset: Offset from start of dma buffer to overlay.
456
 * @src: Source rect, must be within the defined area above.
457
 * @dst: Destination rect, x and y may be negative.
458
 *
459
 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
460
 */
461
 
462
struct drm_vmw_control_stream_arg {
463
        uint32_t stream_id;
464
        uint32_t enabled;
465
 
466
        uint32_t flags;
467
        uint32_t color_key;
468
 
469
        uint32_t handle;
470
        uint32_t offset;
471
        int32_t format;
472
        uint32_t size;
473
        uint32_t width;
474
        uint32_t height;
475
        uint32_t pitch[3];
476
 
477
        uint32_t pad64;
478
        struct drm_vmw_rect src;
479
        struct drm_vmw_rect dst;
480
};
481
 
482
/*************************************************************************/
483
/**
484
 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
485
 *
486
 */
487
 
488
#define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
489
#define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
490
 
491
/**
492
 * struct drm_vmw_cursor_bypass_arg
493
 *
494
 * @flags: Flags.
495
 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
496
 * @xpos: X position of cursor.
497
 * @ypos: Y position of cursor.
498
 * @xhot: X hotspot.
499
 * @yhot: Y hotspot.
500
 *
501
 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
502
 */
503
 
504
struct drm_vmw_cursor_bypass_arg {
505
        uint32_t flags;
506
        uint32_t crtc_id;
507
        int32_t xpos;
508
        int32_t ypos;
509
        int32_t xhot;
510
        int32_t yhot;
511
};
512
 
513
/*************************************************************************/
514
/**
515
 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
516
 */
517
 
518
/**
519
 * struct drm_vmw_context_arg
520
 *
521
 * @stream_id: Device unique context ID.
522
 *
523
 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
524
 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
525
 */
526
 
527
struct drm_vmw_stream_arg {
528
        uint32_t stream_id;
529
        uint32_t pad64;
530
};
531
 
532
/*************************************************************************/
533
/**
534
 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
535
 *
536
 * Return a single stream that was claimed by this process. Also makes
537
 * sure that the stream has been stopped.
538
 */
539
 
540
/*************************************************************************/
541
/**
542
 * DRM_VMW_GET_3D_CAP
543
 *
544
 * Read 3D capabilities from the FIFO
545
 *
546
 */
547
 
548
/**
549
 * struct drm_vmw_get_3d_cap_arg
550
 *
551
 * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
552
 * @size: Max size to copy
553
 *
554
 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
555
 * ioctls.
556
 */
557
 
558
struct drm_vmw_get_3d_cap_arg {
559
        uint64_t buffer;
560
        uint32_t max_size;
561
        uint32_t pad64;
562
};
563
 
564
/*************************************************************************/
565
/**
566
 * DRM_VMW_FENCE_WAIT
567
 *
568
 * Waits for a fence object to signal. The wait is interruptible, so that
569
 * signals may be delivered during the interrupt. The wait may timeout,
570
 * in which case the calls returns -EBUSY. If the wait is restarted,
571
 * that is restarting without resetting @cookie_valid to zero,
572
 * the timeout is computed from the first call.
573
 *
574
 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
575
 * on:
576
 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
577
 * stream
578
 * have executed.
579
 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
580
 * commands
581
 * in the buffer given to the EXECBUF ioctl returning the fence object handle
582
 * are available to user-space.
583
 *
584
 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
585
 * fenc wait ioctl returns 0, the fence object has been unreferenced after
586
 * the wait.
587
 */
588
 
589
#define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
590
#define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
591
 
592
#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
593
 
594
/**
595
 * struct drm_vmw_fence_wait_arg
596
 *
597
 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
598
 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
599
 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
600
 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
601
 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
602
 * before returning.
603
 * @flags: Fence flags to wait on.
604
 * @wait_options: Options that control the behaviour of the wait ioctl.
605
 *
606
 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
607
 */
608
 
609
struct drm_vmw_fence_wait_arg {
610
        uint32_t handle;
611
        int32_t  cookie_valid;
612
        uint64_t kernel_cookie;
613
        uint64_t timeout_us;
614
        int32_t lazy;
615
        int32_t flags;
616
        int32_t wait_options;
617
        int32_t pad64;
618
};
619
 
620
/*************************************************************************/
621
/**
622
 * DRM_VMW_FENCE_SIGNALED
623
 *
624
 * Checks if a fence object is signaled..
625
 */
626
 
627
/**
628
 * struct drm_vmw_fence_signaled_arg
629
 *
630
 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
631
 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
632
 * @signaled: Out: Flags signaled.
633
 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
634
 * EXEC flag of user-space fence objects.
635
 *
636
 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
637
 * ioctls.
638
 */
639
 
640
struct drm_vmw_fence_signaled_arg {
641
         uint32_t handle;
642
         uint32_t flags;
643
         int32_t signaled;
644
         uint32_t passed_seqno;
645
         uint32_t signaled_flags;
646
         uint32_t pad64;
647
};
648
 
649
/*************************************************************************/
650
/**
651
 * DRM_VMW_FENCE_UNREF
652
 *
653
 * Unreferences a fence object, and causes it to be destroyed if there are no
654
 * other references to it.
655
 *
656
 */
657
 
658
/**
659
 * struct drm_vmw_fence_arg
660
 *
661
 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
662
 *
663
 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
664
 */
665
 
666
struct drm_vmw_fence_arg {
667
         uint32_t handle;
668
         uint32_t pad64;
669
};
670
 
671
 
672
/*************************************************************************/
673
/**
674
 * DRM_VMW_FENCE_EVENT
675
 *
676
 * Queues an event on a fence to be delivered on the drm character device
677
 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
678
 * Optionally the approximate time when the fence signaled is
679
 * given by the event.
680
 */
681
 
682
/*
683
 * The event type
684
 */
685
#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
686
 
687
struct drm_vmw_event_fence {
688
        struct drm_event base;
689
        uint64_t user_data;
690
        uint32_t tv_sec;
691
        uint32_t tv_usec;
692
};
693
 
694
/*
695
 * Flags that may be given to the command.
696
 */
697
/* Request fence signaled time on the event. */
698
#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
699
 
700
/**
701
 * struct drm_vmw_fence_event_arg
702
 *
703
 * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
704
 * the fence is not supposed to be referenced by user-space.
705
 * @user_info: Info to be delivered with the event.
706
 * @handle: Attach the event to this fence only.
707
 * @flags: A set of flags as defined above.
708
 */
709
struct drm_vmw_fence_event_arg {
710
        uint64_t fence_rep;
711
        uint64_t user_data;
712
        uint32_t handle;
713
        uint32_t flags;
714
};
715
 
716
 
717
/*************************************************************************/
718
/**
719
 * DRM_VMW_PRESENT
720
 *
721
 * Executes an SVGA present on a given fb for a given surface. The surface
722
 * is placed on the framebuffer. Cliprects are given relative to the given
723
 * point (the point disignated by dest_{x|y}).
724
 *
725
 */
726
 
727
/**
728
 * struct drm_vmw_present_arg
729
 * @fb_id: framebuffer id to present / read back from.
730
 * @sid: Surface id to present from.
731
 * @dest_x: X placement coordinate for surface.
732
 * @dest_y: Y placement coordinate for surface.
733
 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
734
 * @num_clips: Number of cliprects given relative to the framebuffer origin,
735
 * in the same coordinate space as the frame buffer.
736
 * @pad64: Unused 64-bit padding.
737
 *
738
 * Input argument to the DRM_VMW_PRESENT ioctl.
739
 */
740
 
741
struct drm_vmw_present_arg {
742
        uint32_t fb_id;
743
        uint32_t sid;
744
        int32_t dest_x;
745
        int32_t dest_y;
746
        uint64_t clips_ptr;
747
        uint32_t num_clips;
748
        uint32_t pad64;
749
};
750
 
751
 
752
/*************************************************************************/
753
/**
754
 * DRM_VMW_PRESENT_READBACK
755
 *
756
 * Executes an SVGA present readback from a given fb to the dma buffer
757
 * currently bound as the fb. If there is no dma buffer bound to the fb,
758
 * an error will be returned.
759
 *
760
 */
761
 
762
/**
763
 * struct drm_vmw_present_arg
764
 * @fb_id: fb_id to present / read back from.
765
 * @num_clips: Number of cliprects.
766
 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
767
 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
768
 * If this member is NULL, then the ioctl should not return a fence.
769
 */
770
 
771
struct drm_vmw_present_readback_arg {
772
         uint32_t fb_id;
773
         uint32_t num_clips;
774
         uint64_t clips_ptr;
775
         uint64_t fence_rep;
776
};
777
 
778
/*************************************************************************/
779
/**
780
 * DRM_VMW_UPDATE_LAYOUT - Update layout
781
 *
782
 * Updates the preferred modes and connection status for connectors. The
783
 * command consists of one drm_vmw_update_layout_arg pointing to an array
784
 * of num_outputs drm_vmw_rect's.
785
 */
786
 
787
/**
788
 * struct drm_vmw_update_layout_arg
789
 *
790
 * @num_outputs: number of active connectors
791
 * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
792
 *
793
 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
794
 */
795
struct drm_vmw_update_layout_arg {
796
        uint32_t num_outputs;
797
        uint32_t pad64;
798
        uint64_t rects;
799
};
800
 
801
 
802
/*************************************************************************/
803
/**
804
 * DRM_VMW_CREATE_SHADER - Create shader
805
 *
806
 * Creates a shader and optionally binds it to a dma buffer containing
807
 * the shader byte-code.
808
 */
809
 
810
/**
811
 * enum drm_vmw_shader_type - Shader types
812
 */
813
enum drm_vmw_shader_type {
814
        drm_vmw_shader_type_vs = 0,
815
        drm_vmw_shader_type_ps,
816
        drm_vmw_shader_type_gs
817
};
818
 
819
 
820
/**
821
 * struct drm_vmw_shader_create_arg
822
 *
823
 * @shader_type: Shader type of the shader to create.
824
 * @size: Size of the byte-code in bytes.
825
 * where the shader byte-code starts
826
 * @buffer_handle: Buffer handle identifying the buffer containing the
827
 * shader byte-code
828
 * @shader_handle: On successful completion contains a handle that
829
 * can be used to subsequently identify the shader.
830
 * @offset: Offset in bytes into the buffer given by @buffer_handle,
831
 *
832
 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
833
 */
834
struct drm_vmw_shader_create_arg {
835
        enum drm_vmw_shader_type shader_type;
836
        uint32_t size;
837
        uint32_t buffer_handle;
838
        uint32_t shader_handle;
839
        uint64_t offset;
840
};
841
 
842
/*************************************************************************/
843
/**
844
 * DRM_VMW_UNREF_SHADER - Unreferences a shader
845
 *
846
 * Destroys a user-space reference to a shader, optionally destroying
847
 * it.
848
 */
849
 
850
/**
851
 * struct drm_vmw_shader_arg
852
 *
853
 * @handle: Handle identifying the shader to destroy.
854
 *
855
 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
856
 */
857
struct drm_vmw_shader_arg {
858
        uint32_t handle;
859
        uint32_t pad64;
860
};
861
 
862
/*************************************************************************/
863
/**
864
 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
865
 *
866
 * Allocates a surface handle and queues a create surface command
867
 * for the host on the first use of the surface. The surface ID can
868
 * be used as the surface ID in commands referencing the surface.
869
 */
870
 
871
/**
872
 * enum drm_vmw_surface_flags
873
 *
874
 * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
875
 * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
876
 *                                      surface.
877
 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
878
 *                                      given.
879
 */
880
enum drm_vmw_surface_flags {
881
        drm_vmw_surface_flag_shareable = (1 << 0),
882
        drm_vmw_surface_flag_scanout = (1 << 1),
883
        drm_vmw_surface_flag_create_buffer = (1 << 2)
884
};
885
 
886
/**
887
 * struct drm_vmw_gb_surface_create_req
888
 *
889
 * @svga3d_flags:     SVGA3d surface flags for the device.
890
 * @format:           SVGA3d format.
891
 * @mip_level:        Number of mip levels for all faces.
892
 * @drm_surface_flags Flags as described above.
893
 * @multisample_count Future use. Set to 0.
894
 * @autogen_filter    Future use. Set to 0.
895
 * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
896
 *                    if none.
897
 * @base_size         Size of the base mip level for all faces.
898
 *
899
 * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
900
 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
901
 */
902
struct drm_vmw_gb_surface_create_req {
903
        uint32_t svga3d_flags;
904
        uint32_t format;
905
        uint32_t mip_levels;
906
        enum drm_vmw_surface_flags drm_surface_flags;
907
        uint32_t multisample_count;
908
        uint32_t autogen_filter;
909
        uint32_t buffer_handle;
910
        uint32_t pad64;
911
        struct drm_vmw_size base_size;
912
};
913
 
914
/**
915
 * struct drm_vmw_gb_surface_create_rep
916
 *
917
 * @handle:            Surface handle.
918
 * @backup_size:       Size of backup buffers for this surface.
919
 * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
920
 * @buffer_size:       Actual size of the buffer identified by
921
 *                     @buffer_handle
922
 * @buffer_map_handle: Offset into device address space for the buffer
923
 *                     identified by @buffer_handle.
924
 *
925
 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
926
 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
927
 */
928
struct drm_vmw_gb_surface_create_rep {
929
        uint32_t handle;
930
        uint32_t backup_size;
931
        uint32_t buffer_handle;
932
        uint32_t buffer_size;
933
        uint64_t buffer_map_handle;
934
};
935
 
936
/**
937
 * union drm_vmw_gb_surface_create_arg
938
 *
939
 * @req: Input argument as described above.
940
 * @rep: Output argument as described above.
941
 *
942
 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
943
 */
944
union drm_vmw_gb_surface_create_arg {
945
        struct drm_vmw_gb_surface_create_rep rep;
946
        struct drm_vmw_gb_surface_create_req req;
947
};
948
 
949
/*************************************************************************/
950
/**
951
 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
952
 *
953
 * Puts a reference on a host surface with a given handle, as previously
954
 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
955
 * A reference will make sure the surface isn't destroyed while we hold
956
 * it and will allow the calling client to use the surface handle in
957
 * the command stream.
958
 *
959
 * On successful return, the Ioctl returns the surface information given
960
 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
961
 */
962
 
963
/**
964
 * struct drm_vmw_gb_surface_reference_arg
965
 *
966
 * @creq: The data used as input when the surface was created, as described
967
 *        above at "struct drm_vmw_gb_surface_create_req"
968
 * @crep: Additional data output when the surface was created, as described
969
 *        above at "struct drm_vmw_gb_surface_create_rep"
970
 *
971
 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
972
 */
973
struct drm_vmw_gb_surface_ref_rep {
974
        struct drm_vmw_gb_surface_create_req creq;
975
        struct drm_vmw_gb_surface_create_rep crep;
976
};
977
 
978
/**
979
 * union drm_vmw_gb_surface_reference_arg
980
 *
981
 * @req: Input data as described above at "struct drm_vmw_surface_arg"
982
 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
983
 *
984
 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
985
 */
986
union drm_vmw_gb_surface_reference_arg {
987
        struct drm_vmw_gb_surface_ref_rep rep;
988
        struct drm_vmw_surface_arg req;
989
};
990
 
991
 
992
/*************************************************************************/
993
/**
994
 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
995
 *
996
 * Idles any previously submitted GPU operations on the buffer and
997
 * by default blocks command submissions that reference the buffer.
998
 * If the file descriptor used to grab a blocking CPU sync is closed, the
999
 * cpu sync is released.
1000
 * The flags argument indicates how the grab / release operation should be
1001
 * performed:
1002
 */
1003
 
1004
/**
1005
 * enum drm_vmw_synccpu_flags - Synccpu flags:
1006
 *
1007
 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1008
 * hint to the kernel to allow command submissions that references the buffer
1009
 * for read-only.
1010
 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1011
 * referencing this buffer.
1012
 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1013
 * -EBUSY should the buffer be busy.
1014
 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1015
 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1016
 * behavior.
1017
 */
1018
enum drm_vmw_synccpu_flags {
1019
        drm_vmw_synccpu_read = (1 << 0),
1020
        drm_vmw_synccpu_write = (1 << 1),
1021
        drm_vmw_synccpu_dontblock = (1 << 2),
1022
        drm_vmw_synccpu_allow_cs = (1 << 3)
1023
};
1024
 
1025
/**
1026
 * enum drm_vmw_synccpu_op - Synccpu operations:
1027
 *
1028
 * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
1029
 * @drm_vmw_synccpu_release: Release a previous grab.
1030
 */
1031
enum drm_vmw_synccpu_op {
1032
        drm_vmw_synccpu_grab,
1033
        drm_vmw_synccpu_release
1034
};
1035
 
1036
/**
1037
 * struct drm_vmw_synccpu_arg
1038
 *
1039
 * @op:                      The synccpu operation as described above.
1040
 * @handle:                  Handle identifying the buffer object.
1041
 * @flags:                   Flags as described above.
1042
 */
1043
struct drm_vmw_synccpu_arg {
1044
        enum drm_vmw_synccpu_op op;
1045
        enum drm_vmw_synccpu_flags flags;
1046
        uint32_t handle;
1047
        uint32_t pad64;
1048
};
1049
 
1050
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.