1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef __VMWGFX_DRM_H__
29 #define __VMWGFX_DRM_H__
31 #define DRM_VMW_MAX_SURFACE_FACES 6
32 #define DRM_VMW_MAX_MIP_LEVELS 24
34 #define DRM_VMW_EXT_NAME_LEN 128
36 #define DRM_VMW_GET_PARAM 0
37 #define DRM_VMW_ALLOC_DMABUF 1
38 #define DRM_VMW_UNREF_DMABUF 2
39 #define DRM_VMW_CURSOR_BYPASS 3
40 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
41 #define DRM_VMW_CONTROL_STREAM 4
42 #define DRM_VMW_CLAIM_STREAM 5
43 #define DRM_VMW_UNREF_STREAM 6
44 /* guarded by DRM_VMW_PARAM_3D == 1 */
45 #define DRM_VMW_CREATE_CONTEXT 7
46 #define DRM_VMW_UNREF_CONTEXT 8
47 #define DRM_VMW_CREATE_SURFACE 9
48 #define DRM_VMW_UNREF_SURFACE 10
49 #define DRM_VMW_REF_SURFACE 11
50 #define DRM_VMW_EXECBUF 12
51 #define DRM_VMW_GET_3D_CAP 13
52 #define DRM_VMW_FENCE_WAIT 14
53 #define DRM_VMW_FENCE_SIGNALED 15
54 #define DRM_VMW_FENCE_UNREF 16
55 #define DRM_VMW_FENCE_EVENT 17
58 /*************************************************************************/
60 * DRM_VMW_GET_PARAM - get device information.
62 * DRM_VMW_PARAM_FIFO_OFFSET:
63 * Offset to use to map the first page of the FIFO read-only.
64 * The fifo is mapped using the mmap() system call on the drm device.
66 * DRM_VMW_PARAM_OVERLAY_IOCTL:
67 * Does the driver support the overlay ioctl.
70 #define DRM_VMW_PARAM_NUM_STREAMS 0
71 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
72 #define DRM_VMW_PARAM_3D 2
73 #define DRM_VMW_PARAM_HW_CAPS 3
74 #define DRM_VMW_PARAM_FIFO_CAPS 4
75 #define DRM_VMW_PARAM_MAX_FB_SIZE 5
76 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6
79 * struct drm_vmw_getparam_arg
81 * @value: Returned value. //Out
82 * @param: Parameter to query. //In.
84 * Argument to the DRM_VMW_GET_PARAM Ioctl.
87 struct drm_vmw_getparam_arg {
93 /*************************************************************************/
95 * DRM_VMW_EXTENSION - Query device extensions.
99 * struct drm_vmw_extension_rep
101 * @exists: The queried extension exists.
102 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
103 * @driver_sarea_offset: Offset to any space in the DRI SAREA
104 * used by the extension.
105 * @major: Major version number of the extension.
106 * @minor: Minor version number of the extension.
107 * @pl: Patch level version number of the extension.
109 * Output argument to the DRM_VMW_EXTENSION Ioctl.
112 struct drm_vmw_extension_rep {
114 uint32_t driver_ioctl_offset;
115 uint32_t driver_sarea_offset;
123 * union drm_vmw_extension_arg
125 * @extension - Ascii name of the extension to be queried. //In
126 * @rep - Reply as defined above. //Out
128 * Argument to the DRM_VMW_EXTENSION Ioctl.
131 union drm_vmw_extension_arg {
132 char extension[DRM_VMW_EXT_NAME_LEN];
133 struct drm_vmw_extension_rep rep;
136 /*************************************************************************/
138 * DRM_VMW_CREATE_CONTEXT - Create a host context.
140 * Allocates a device unique context id, and queues a create context command
141 * for the host. Does not wait for host completion.
145 * struct drm_vmw_context_arg
147 * @cid: Device unique context ID.
149 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
150 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
153 struct drm_vmw_context_arg {
158 /*************************************************************************/
160 * DRM_VMW_UNREF_CONTEXT - Create a host context.
162 * Frees a global context id, and queues a destroy host command for the host.
163 * Does not wait for host completion. The context ID can be used directly
164 * in the command stream and shows up as the same context ID on the host.
167 /*************************************************************************/
169 * DRM_VMW_CREATE_SURFACE - Create a host suface.
171 * Allocates a device unique surface id, and queues a create surface command
172 * for the host. Does not wait for host completion. The surface ID can be
173 * used directly in the command stream and shows up as the same surface
178 * struct drm_wmv_surface_create_req
180 * @flags: Surface flags as understood by the host.
181 * @format: Surface format as understood by the host.
182 * @mip_levels: Number of mip levels for each face.
183 * An unused face should have 0 encoded.
184 * @size_addr: Address of a user-space array of sruct drm_vmw_size
185 * cast to an uint64_t for 32-64 bit compatibility.
186 * The size of the array should equal the total number of mipmap levels.
187 * @shareable: Boolean whether other clients (as identified by file descriptors)
188 * may reference this surface.
189 * @scanout: Boolean whether the surface is intended to be used as a
192 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
193 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
196 struct drm_vmw_surface_create_req {
199 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
206 * struct drm_wmv_surface_arg
208 * @sid: Surface id of created surface or surface to destroy or reference.
210 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
211 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
212 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
215 struct drm_vmw_surface_arg {
221 * struct drm_vmw_size ioctl.
223 * @width - mip level width
224 * @height - mip level height
225 * @depth - mip level depth
227 * Description of a mip level.
228 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
231 struct drm_vmw_size {
239 * union drm_vmw_surface_create_arg
241 * @rep: Output data as described above.
242 * @req: Input data as described above.
244 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
247 union drm_vmw_surface_create_arg {
248 struct drm_vmw_surface_arg rep;
249 struct drm_vmw_surface_create_req req;
252 /*************************************************************************/
254 * DRM_VMW_REF_SURFACE - Reference a host surface.
256 * Puts a reference on a host surface with a give sid, as previously
257 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
258 * A reference will make sure the surface isn't destroyed while we hold
259 * it and will allow the calling client to use the surface ID in the command
262 * On successful return, the Ioctl returns the surface information given
263 * in the DRM_VMW_CREATE_SURFACE ioctl.
267 * union drm_vmw_surface_reference_arg
269 * @rep: Output data as described above.
270 * @req: Input data as described above.
272 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
275 union drm_vmw_surface_reference_arg {
276 struct drm_vmw_surface_create_req rep;
277 struct drm_vmw_surface_arg req;
280 /*************************************************************************/
282 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
284 * Clear a reference previously put on a host surface.
285 * When all references are gone, including the one implicitly placed
287 * a destroy surface command will be queued for the host.
288 * Does not wait for completion.
291 /*************************************************************************/
295 * Submit a command buffer for execution on the host, and return a
296 * fence seqno that when signaled, indicates that the command buffer has
301 * struct drm_vmw_execbuf_arg
303 * @commands: User-space address of a command buffer cast to an uint64_t.
304 * @command-size: Size in bytes of the command buffer.
305 * @throttle-us: Sleep until software is less than @throttle_us
306 * microseconds ahead of hardware. The driver may round this value
307 * to the nearest kernel tick.
308 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
310 * @version: Allows expanding the execbuf ioctl parameters without breaking
311 * backwards compatibility, since user-space will always tell the kernel
312 * which version it uses.
313 * @flags: Execbuf flags. None currently.
315 * Argument to the DRM_VMW_EXECBUF Ioctl.
318 #define DRM_VMW_EXECBUF_VERSION 0
320 struct drm_vmw_execbuf_arg {
322 uint32_t command_size;
323 uint32_t throttle_us;
330 * struct drm_vmw_fence_rep
332 * @handle: Fence object handle for fence associated with a command submission.
333 * @mask: Fence flags relevant for this fence object.
334 * @seqno: Fence sequence number in fifo. A fence object with a lower
335 * seqno will signal the EXEC flag before a fence object with a higher
336 * seqno. This can be used by user-space to avoid kernel calls to determine
337 * whether a fence has signaled the EXEC flag. Note that @seqno will
339 * @passed_seqno: The highest seqno number processed by the hardware
340 * so far. This can be used to mark user-space fence objects as signaled, and
341 * to determine whether a fence seqno might be stale.
342 * @error: This member should've been set to -EFAULT on submission.
343 * The following actions should be take on completion:
344 * error == -EFAULT: Fence communication failed. The host is synchronized.
345 * Use the last fence id read from the FIFO fence register.
346 * error != 0 && error != -EFAULT:
347 * Fence submission failed. The host is synchronized. Use the fence_seq member.
348 * error == 0: All is OK, The host may not be synchronized.
349 * Use the fence_seq member.
351 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
354 struct drm_vmw_fence_rep {
358 uint32_t passed_seqno;
363 /*************************************************************************/
365 * DRM_VMW_ALLOC_DMABUF
367 * Allocate a DMA buffer that is visible also to the host.
368 * NOTE: The buffer is
369 * identified by a handle and an offset, which are private to the guest, but
370 * useable in the command stream. The guest kernel may translate these
371 * and patch up the command stream accordingly. In the future, the offset may
372 * be zero at all times, or it may disappear from the interface before it is
375 * The DMA buffer may stay user-space mapped in the guest at all times,
376 * and is thus suitable for sub-allocation.
378 * DMA buffers are mapped using the mmap() syscall on the drm device.
382 * struct drm_vmw_alloc_dmabuf_req
384 * @size: Required minimum size of the buffer.
386 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
389 struct drm_vmw_alloc_dmabuf_req {
395 * struct drm_vmw_dmabuf_rep
397 * @map_handle: Offset to use in the mmap() call used to map the buffer.
398 * @handle: Handle unique to this buffer. Used for unreferencing.
399 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
400 * referenced. See not above.
401 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
402 * referenced. See note above.
404 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
407 struct drm_vmw_dmabuf_rep {
411 uint32_t cur_gmr_offset;
416 * union drm_vmw_dmabuf_arg
418 * @req: Input data as described above.
419 * @rep: Output data as described above.
421 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
424 union drm_vmw_alloc_dmabuf_arg {
425 struct drm_vmw_alloc_dmabuf_req req;
426 struct drm_vmw_dmabuf_rep rep;
429 /*************************************************************************/
431 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
436 * struct drm_vmw_unref_dmabuf_arg
438 * @handle: Handle indicating what buffer to free. Obtained from the
439 * DRM_VMW_ALLOC_DMABUF Ioctl.
441 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
444 struct drm_vmw_unref_dmabuf_arg {
449 /*************************************************************************/
451 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
453 * This IOCTL controls the overlay units of the svga device.
454 * The SVGA overlay units does not work like regular hardware units in
455 * that they do not automaticaly read back the contents of the given dma
456 * buffer. But instead only read back for each call to this ioctl, and
457 * at any point between this call being made and a following call that
458 * either changes the buffer or disables the stream.
462 * struct drm_vmw_rect
464 * Defines a rectangle. Used in the overlay ioctl to define
465 * source and destination rectangle.
468 struct drm_vmw_rect {
476 * struct drm_vmw_control_stream_arg
478 * @stream_id: Stearm to control
479 * @enabled: If false all following arguments are ignored.
480 * @handle: Handle to buffer for getting data from.
481 * @format: Format of the overlay as understood by the host.
482 * @width: Width of the overlay.
483 * @height: Height of the overlay.
484 * @size: Size of the overlay in bytes.
485 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
486 * @offset: Offset from start of dma buffer to overlay.
487 * @src: Source rect, must be within the defined area above.
488 * @dst: Destination rect, x and y may be negative.
490 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
493 struct drm_vmw_control_stream_arg {
509 struct drm_vmw_rect src;
510 struct drm_vmw_rect dst;
513 /*************************************************************************/
515 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
519 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
520 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
523 * struct drm_vmw_cursor_bypass_arg
526 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
527 * @xpos: X position of cursor.
528 * @ypos: Y position of cursor.
532 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
535 struct drm_vmw_cursor_bypass_arg {
544 /*************************************************************************/
546 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
550 * struct drm_vmw_context_arg
552 * @stream_id: Device unique context ID.
554 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
555 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
558 struct drm_vmw_stream_arg {
563 /*************************************************************************/
565 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
567 * Return a single stream that was claimed by this process. Also makes
568 * sure that the stream has been stopped.
571 /*************************************************************************/
575 * Read 3D capabilities from the FIFO
580 * struct drm_vmw_get_3d_cap_arg
582 * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
583 * @size: Max size to copy
585 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
589 struct drm_vmw_get_3d_cap_arg {
595 /*************************************************************************/
597 * DRM_VMW_UPDATE_LAYOUT - Update layout
599 * Updates the preferred modes and connection status for connectors. The
600 * command conisits of one drm_vmw_update_layout_arg pointing out a array
601 * of num_outputs drm_vmw_rect's.
605 * struct drm_vmw_update_layout_arg
607 * @num_outputs: number of active
608 * @rects: pointer to array of drm_vmw_rect
610 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
613 struct drm_vmw_update_layout_arg {
614 uint32_t num_outputs;
620 /*************************************************************************/
624 * Waits for a fence object to signal. The wait is interruptible, so that
625 * signals may be delivered during the interrupt. The wait may timeout,
626 * in which case the calls returns -EBUSY. If the wait is restarted,
627 * that is restarting without resetting @cookie_valid to zero,
628 * the timeout is computed from the first call.
630 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
632 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
635 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
637 * in the buffer given to the EXECBUF ioctl returning the fence object handle
638 * are available to user-space.
640 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
641 * fenc wait ioctl returns 0, the fence object has been unreferenced after
645 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
646 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
648 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
651 * struct drm_vmw_fence_wait_arg
653 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
654 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
655 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
656 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
657 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
659 * @flags: Fence flags to wait on.
660 * @wait_options: Options that control the behaviour of the wait ioctl.
662 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
665 struct drm_vmw_fence_wait_arg {
667 int32_t cookie_valid;
668 uint64_t kernel_cookie;
672 int32_t wait_options;
676 /*************************************************************************/
678 * DRM_VMW_FENCE_SIGNALED
680 * Checks if a fence object is signaled..
684 * struct drm_vmw_fence_signaled_arg
686 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
687 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
688 * @signaled: Out: Flags signaled.
689 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
690 * EXEC flag of user-space fence objects.
692 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
696 struct drm_vmw_fence_signaled_arg {
700 uint32_t passed_seqno;
701 uint32_t signaled_flags;
705 /*************************************************************************/
707 * DRM_VMW_FENCE_UNREF
709 * Unreferences a fence object, and causes it to be destroyed if there are no
710 * other references to it.
715 * struct drm_vmw_fence_arg
717 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
719 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
722 struct drm_vmw_fence_arg {