1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
31 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
33 #include "ttm/ttm_bo_driver.h"
34 #include "ttm/ttm_object.h"
35 #include "ttm/ttm_module.h"
37 #define VMWGFX_DRIVER_NAME "vmwgfx"
38 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39 #define VMWGFX_CHIP_SVGAII 0
40 #define VMW_FB_RESERVATION 0
43 * Fully encoded drm commands. Might move to vmw_drm.h
46 #define DRM_IOCTL_VMW_GET_PARAM \
47 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
48 struct drm_vmw_getparam_arg)
49 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
50 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
51 union drm_vmw_alloc_dmabuf_arg)
52 #define DRM_IOCTL_VMW_UNREF_DMABUF \
53 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
54 struct drm_vmw_unref_dmabuf_arg)
55 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
56 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
57 struct drm_vmw_cursor_bypass_arg)
59 #define DRM_IOCTL_VMW_CONTROL_STREAM \
60 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
61 struct drm_vmw_control_stream_arg)
62 #define DRM_IOCTL_VMW_CLAIM_STREAM \
63 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
64 struct drm_vmw_stream_arg)
65 #define DRM_IOCTL_VMW_UNREF_STREAM \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
67 struct drm_vmw_stream_arg)
69 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
70 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
71 struct drm_vmw_context_arg)
72 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
73 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
74 struct drm_vmw_context_arg)
75 #define DRM_IOCTL_VMW_CREATE_SURFACE \
76 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
77 union drm_vmw_surface_create_arg)
78 #define DRM_IOCTL_VMW_UNREF_SURFACE \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
80 struct drm_vmw_surface_arg)
81 #define DRM_IOCTL_VMW_REF_SURFACE \
82 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
83 union drm_vmw_surface_reference_arg)
84 #define DRM_IOCTL_VMW_EXECBUF \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
86 struct drm_vmw_execbuf_arg)
87 #define DRM_IOCTL_VMW_GET_3D_CAP \
88 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
89 struct drm_vmw_get_3d_cap_arg)
90 #define DRM_IOCTL_VMW_FENCE_WAIT \
91 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
92 struct drm_vmw_fence_wait_arg)
93 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
95 struct drm_vmw_fence_signaled_arg)
96 #define DRM_IOCTL_VMW_FENCE_UNREF \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
98 struct drm_vmw_fence_arg)
99 #define DRM_IOCTL_VMW_FENCE_EVENT \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
101 struct drm_vmw_fence_event_arg)
102 #define DRM_IOCTL_VMW_PRESENT \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
104 struct drm_vmw_present_arg)
105 #define DRM_IOCTL_VMW_PRESENT_READBACK \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
107 struct drm_vmw_present_readback_arg)
108 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
110 struct drm_vmw_update_layout_arg)
113 * The core DRM version of this macro doesn't account for
117 #define VMW_IOCTL_DEF(ioctl, func, flags) \
118 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
124 static struct drm_ioctl_desc vmw_ioctls[] = {
125 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
126 DRM_AUTH | DRM_UNLOCKED),
127 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
128 DRM_AUTH | DRM_UNLOCKED),
129 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
130 DRM_AUTH | DRM_UNLOCKED),
131 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
132 vmw_kms_cursor_bypass_ioctl,
133 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
135 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
136 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
137 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
138 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
139 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
140 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
143 DRM_AUTH | DRM_UNLOCKED),
144 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
145 DRM_AUTH | DRM_UNLOCKED),
146 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
147 DRM_AUTH | DRM_UNLOCKED),
148 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
149 DRM_AUTH | DRM_UNLOCKED),
150 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
151 DRM_AUTH | DRM_UNLOCKED),
152 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
153 DRM_AUTH | DRM_UNLOCKED),
154 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
155 DRM_AUTH | DRM_UNLOCKED),
156 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
157 vmw_fence_obj_signaled_ioctl,
158 DRM_AUTH | DRM_UNLOCKED),
159 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
160 DRM_AUTH | DRM_UNLOCKED),
161 VMW_IOCTL_DEF(VMW_FENCE_EVENT,
162 vmw_fence_event_ioctl,
163 DRM_AUTH | DRM_UNLOCKED),
164 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
165 DRM_AUTH | DRM_UNLOCKED),
167 /* these allow direct access to the framebuffers mark as master only */
168 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
169 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
170 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
171 vmw_present_readback_ioctl,
172 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
173 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
174 vmw_kms_update_layout_ioctl,
175 DRM_MASTER | DRM_UNLOCKED),
178 static struct pci_device_id vmw_pci_id_list[] = {
179 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
182 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
184 static int enable_fbdev;
186 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
187 static void vmw_master_init(struct vmw_master *);
188 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
191 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
192 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
194 static void vmw_print_capabilities(uint32_t capabilities)
196 DRM_INFO("Capabilities:\n");
197 if (capabilities & SVGA_CAP_RECT_COPY)
198 DRM_INFO(" Rect copy.\n");
199 if (capabilities & SVGA_CAP_CURSOR)
200 DRM_INFO(" Cursor.\n");
201 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
202 DRM_INFO(" Cursor bypass.\n");
203 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
204 DRM_INFO(" Cursor bypass 2.\n");
205 if (capabilities & SVGA_CAP_8BIT_EMULATION)
206 DRM_INFO(" 8bit emulation.\n");
207 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
208 DRM_INFO(" Alpha cursor.\n");
209 if (capabilities & SVGA_CAP_3D)
211 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
212 DRM_INFO(" Extended Fifo.\n");
213 if (capabilities & SVGA_CAP_MULTIMON)
214 DRM_INFO(" Multimon.\n");
215 if (capabilities & SVGA_CAP_PITCHLOCK)
216 DRM_INFO(" Pitchlock.\n");
217 if (capabilities & SVGA_CAP_IRQMASK)
218 DRM_INFO(" Irq mask.\n");
219 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
220 DRM_INFO(" Display Topology.\n");
221 if (capabilities & SVGA_CAP_GMR)
223 if (capabilities & SVGA_CAP_TRACES)
224 DRM_INFO(" Traces.\n");
225 if (capabilities & SVGA_CAP_GMR2)
226 DRM_INFO(" GMR2.\n");
227 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
228 DRM_INFO(" Screen Object 2.\n");
233 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
234 * the start of a buffer object.
236 * @dev_priv: The device private structure.
238 * This function will idle the buffer using an uninterruptible wait, then
239 * map the first page and initialize a pending occlusion query result structure,
240 * Finally it will unmap the buffer.
242 * TODO: Since we're only mapping a single page, we should optimize the map
243 * to use kmap_atomic / iomap_atomic.
245 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
247 struct ttm_bo_kmap_obj map;
248 volatile SVGA3dQueryResult *result;
251 struct ttm_bo_device *bdev = &dev_priv->bdev;
252 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
254 ttm_bo_reserve(bo, false, false, false, 0);
255 spin_lock(&bdev->fence_lock);
256 ret = ttm_bo_wait(bo, false, false, false);
257 spin_unlock(&bdev->fence_lock);
258 if (unlikely(ret != 0))
259 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
262 ret = ttm_bo_kmap(bo, 0, 1, &map);
263 if (likely(ret == 0)) {
264 result = ttm_kmap_obj_virtual(&map, &dummy);
265 result->totalSize = sizeof(*result);
266 result->state = SVGA3D_QUERYSTATE_PENDING;
267 result->result32 = 0xff;
270 DRM_ERROR("Dummy query buffer map failed.\n");
271 ttm_bo_unreserve(bo);
276 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
278 * @dev_priv: A device private structure.
280 * This function creates a small buffer object that holds the query
281 * result for dummy queries emitted as query barriers.
282 * No interruptible waits are done within this function.
284 * Returns an error if bo creation fails.
286 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
288 return ttm_bo_create(&dev_priv->bdev,
291 &vmw_vram_sys_placement,
293 &dev_priv->dummy_query_bo);
297 static int vmw_request_device(struct vmw_private *dev_priv)
301 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
302 if (unlikely(ret != 0)) {
303 DRM_ERROR("Unable to initialize FIFO.\n");
306 vmw_fence_fifo_up(dev_priv->fman);
307 ret = vmw_dummy_query_bo_create(dev_priv);
308 if (unlikely(ret != 0))
309 goto out_no_query_bo;
310 vmw_dummy_query_bo_prepare(dev_priv);
315 vmw_fence_fifo_down(dev_priv->fman);
316 vmw_fifo_release(dev_priv, &dev_priv->fifo);
320 static void vmw_release_device(struct vmw_private *dev_priv)
323 * Previous destructions should've released
327 BUG_ON(dev_priv->pinned_bo != NULL);
329 ttm_bo_unref(&dev_priv->dummy_query_bo);
330 vmw_fence_fifo_down(dev_priv->fman);
331 vmw_fifo_release(dev_priv, &dev_priv->fifo);
335 * Increase the 3d resource refcount.
336 * If the count was prevously zero, initialize the fifo, switching to svga
337 * mode. Note that the master holds a ref as well, and may request an
338 * explicit switch to svga mode if fb is not running, using @unhide_svga.
340 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
345 mutex_lock(&dev_priv->release_mutex);
346 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
347 ret = vmw_request_device(dev_priv);
348 if (unlikely(ret != 0))
349 --dev_priv->num_3d_resources;
350 } else if (unhide_svga) {
351 mutex_lock(&dev_priv->hw_mutex);
352 vmw_write(dev_priv, SVGA_REG_ENABLE,
353 vmw_read(dev_priv, SVGA_REG_ENABLE) &
354 ~SVGA_REG_ENABLE_HIDE);
355 mutex_unlock(&dev_priv->hw_mutex);
358 mutex_unlock(&dev_priv->release_mutex);
363 * Decrease the 3d resource refcount.
364 * If the count reaches zero, disable the fifo, switching to vga mode.
365 * Note that the master holds a refcount as well, and may request an
366 * explicit switch to vga mode when it releases its refcount to account
367 * for the situation of an X server vt switch to VGA with 3d resources
370 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
375 mutex_lock(&dev_priv->release_mutex);
376 if (unlikely(--dev_priv->num_3d_resources == 0))
377 vmw_release_device(dev_priv);
378 else if (hide_svga) {
379 mutex_lock(&dev_priv->hw_mutex);
380 vmw_write(dev_priv, SVGA_REG_ENABLE,
381 vmw_read(dev_priv, SVGA_REG_ENABLE) |
382 SVGA_REG_ENABLE_HIDE);
383 mutex_unlock(&dev_priv->hw_mutex);
386 n3d = (int32_t) dev_priv->num_3d_resources;
387 mutex_unlock(&dev_priv->release_mutex);
392 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
394 struct vmw_private *dev_priv;
398 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
399 if (unlikely(dev_priv == NULL)) {
400 DRM_ERROR("Failed allocating a device private struct.\n");
403 memset(dev_priv, 0, sizeof(*dev_priv));
406 dev_priv->vmw_chipset = chipset;
407 dev_priv->last_read_seqno = (uint32_t) -100;
408 mutex_init(&dev_priv->hw_mutex);
409 mutex_init(&dev_priv->cmdbuf_mutex);
410 mutex_init(&dev_priv->release_mutex);
411 rwlock_init(&dev_priv->resource_lock);
412 idr_init(&dev_priv->context_idr);
413 idr_init(&dev_priv->surface_idr);
414 idr_init(&dev_priv->stream_idr);
415 mutex_init(&dev_priv->init_mutex);
416 init_waitqueue_head(&dev_priv->fence_queue);
417 init_waitqueue_head(&dev_priv->fifo_queue);
418 dev_priv->fence_queue_waiters = 0;
419 atomic_set(&dev_priv->fifo_queue_waiters, 0);
420 INIT_LIST_HEAD(&dev_priv->surface_lru);
421 dev_priv->used_memory_size = 0;
423 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
424 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
425 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
427 dev_priv->enable_fb = enable_fbdev;
429 mutex_lock(&dev_priv->hw_mutex);
431 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
432 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
433 if (svga_id != SVGA_ID_2) {
435 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
436 mutex_unlock(&dev_priv->hw_mutex);
440 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
442 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
443 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
444 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
445 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
446 if (dev_priv->capabilities & SVGA_CAP_GMR) {
447 dev_priv->max_gmr_descriptors =
449 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
450 dev_priv->max_gmr_ids =
451 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
453 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
454 dev_priv->max_gmr_pages =
455 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
456 dev_priv->memory_size =
457 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
458 dev_priv->memory_size -= dev_priv->vram_size;
461 * An arbitrary limit of 512MiB on surface
462 * memory. But all HWV8 hardware supports GMR2.
464 dev_priv->memory_size = 512*1024*1024;
467 mutex_unlock(&dev_priv->hw_mutex);
469 vmw_print_capabilities(dev_priv->capabilities);
471 if (dev_priv->capabilities & SVGA_CAP_GMR) {
472 DRM_INFO("Max GMR ids is %u\n",
473 (unsigned)dev_priv->max_gmr_ids);
474 DRM_INFO("Max GMR descriptors is %u\n",
475 (unsigned)dev_priv->max_gmr_descriptors);
477 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
478 DRM_INFO("Max number of GMR pages is %u\n",
479 (unsigned)dev_priv->max_gmr_pages);
480 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
481 (unsigned)dev_priv->memory_size / 1024);
483 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
484 dev_priv->vram_start, dev_priv->vram_size / 1024);
485 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
486 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
488 ret = vmw_ttm_global_init(dev_priv);
489 if (unlikely(ret != 0))
493 vmw_master_init(&dev_priv->fbdev_master);
494 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
495 dev_priv->active_master = &dev_priv->fbdev_master;
498 ret = ttm_bo_device_init(&dev_priv->bdev,
499 dev_priv->bo_global_ref.ref.object,
500 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
502 if (unlikely(ret != 0)) {
503 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
507 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
508 dev_priv->mmio_size, DRM_MTRR_WC);
510 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
511 dev_priv->mmio_size);
513 if (unlikely(dev_priv->mmio_virt == NULL)) {
515 DRM_ERROR("Failed mapping MMIO.\n");
519 /* Need mmio memory to check for fifo pitchlock cap. */
520 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
521 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
522 !vmw_fifo_have_pitchlock(dev_priv)) {
524 DRM_ERROR("Hardware has no pitchlock\n");
528 dev_priv->tdev = ttm_object_device_init
529 (dev_priv->mem_global_ref.object, 12);
531 if (unlikely(dev_priv->tdev == NULL)) {
532 DRM_ERROR("Unable to initialize TTM object management.\n");
537 dev->dev_private = dev_priv;
539 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
540 dev_priv->stealth = (ret != 0);
541 if (dev_priv->stealth) {
543 * Request at least the mmio PCI resource.
546 DRM_INFO("It appears like vesafb is loaded. "
547 "Ignore above error if any.\n");
548 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
549 if (unlikely(ret != 0)) {
550 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
555 dev_priv->fman = vmw_fence_manager_init(dev_priv);
556 if (unlikely(dev_priv->fman == NULL))
560 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
561 (dev_priv->vram_size >> PAGE_SHIFT));
562 if (unlikely(ret != 0)) {
563 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
567 dev_priv->has_gmr = true;
568 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
569 dev_priv->max_gmr_ids) != 0) {
570 DRM_INFO("No GMR memory available. "
571 "Graphics memory resources are very limited.\n");
572 dev_priv->has_gmr = false;
575 /* Need to start the fifo to check if we can do screen objects */
576 ret = vmw_3d_resource_inc(dev_priv, true);
577 if (unlikely(ret != 0))
579 vmw_kms_save_vga(dev_priv);
581 /* Start kms and overlay systems, needs fifo. */
582 ret = vmw_kms_init(dev_priv);
583 if (unlikely(ret != 0))
585 vmw_overlay_init(dev_priv);
587 /* 3D Depends on Screen Objects being used. */
588 DRM_INFO("Detected %sdevice 3D availability.\n",
589 vmw_fifo_have_3d(dev_priv) ?
592 /* We might be done with the fifo now */
593 if (dev_priv->enable_fb) {
594 vmw_fb_init(dev_priv);
596 vmw_kms_restore_vga(dev_priv);
597 vmw_3d_resource_dec(dev_priv, true);
600 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
601 ret = drm_irq_install(dev);
602 if (unlikely(ret != 0)) {
603 DRM_ERROR("Failed installing irq: %d\n", ret);
608 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
609 register_pm_notifier(&dev_priv->pm_nb);
614 if (dev_priv->enable_fb)
615 vmw_fb_close(dev_priv);
616 vmw_overlay_close(dev_priv);
617 vmw_kms_close(dev_priv);
619 /* We still have a 3D resource reference held */
620 if (dev_priv->enable_fb) {
621 vmw_kms_restore_vga(dev_priv);
622 vmw_3d_resource_dec(dev_priv, false);
625 if (dev_priv->has_gmr)
626 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
627 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
629 vmw_fence_manager_takedown(dev_priv->fman);
631 if (dev_priv->stealth)
632 pci_release_region(dev->pdev, 2);
634 pci_release_regions(dev->pdev);
636 ttm_object_device_release(&dev_priv->tdev);
638 iounmap(dev_priv->mmio_virt);
640 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
641 dev_priv->mmio_size, DRM_MTRR_WC);
642 (void)ttm_bo_device_release(&dev_priv->bdev);
644 vmw_ttm_global_release(dev_priv);
646 idr_destroy(&dev_priv->surface_idr);
647 idr_destroy(&dev_priv->context_idr);
648 idr_destroy(&dev_priv->stream_idr);
653 static int vmw_driver_unload(struct drm_device *dev)
655 struct vmw_private *dev_priv = vmw_priv(dev);
657 unregister_pm_notifier(&dev_priv->pm_nb);
659 if (dev_priv->ctx.cmd_bounce)
660 vfree(dev_priv->ctx.cmd_bounce);
661 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
662 drm_irq_uninstall(dev_priv->dev);
663 if (dev_priv->enable_fb) {
664 vmw_fb_close(dev_priv);
665 vmw_kms_restore_vga(dev_priv);
666 vmw_3d_resource_dec(dev_priv, false);
668 vmw_kms_close(dev_priv);
669 vmw_overlay_close(dev_priv);
671 if (dev_priv->has_gmr)
672 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
673 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
675 vmw_fence_manager_takedown(dev_priv->fman);
676 if (dev_priv->stealth)
677 pci_release_region(dev->pdev, 2);
679 pci_release_regions(dev->pdev);
681 ttm_object_device_release(&dev_priv->tdev);
682 iounmap(dev_priv->mmio_virt);
683 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
684 dev_priv->mmio_size, DRM_MTRR_WC);
685 (void)ttm_bo_device_release(&dev_priv->bdev);
686 vmw_ttm_global_release(dev_priv);
687 idr_destroy(&dev_priv->surface_idr);
688 idr_destroy(&dev_priv->context_idr);
689 idr_destroy(&dev_priv->stream_idr);
696 static void vmw_postclose(struct drm_device *dev,
697 struct drm_file *file_priv)
699 struct vmw_fpriv *vmw_fp;
701 vmw_fp = vmw_fpriv(file_priv);
702 ttm_object_file_release(&vmw_fp->tfile);
703 if (vmw_fp->locked_master)
704 drm_master_put(&vmw_fp->locked_master);
708 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
710 struct vmw_private *dev_priv = vmw_priv(dev);
711 struct vmw_fpriv *vmw_fp;
714 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
715 if (unlikely(vmw_fp == NULL))
718 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
719 if (unlikely(vmw_fp->tfile == NULL))
722 file_priv->driver_priv = vmw_fp;
724 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
725 dev_priv->bdev.dev_mapping =
726 file_priv->filp->f_path.dentry->d_inode->i_mapping;
735 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
738 struct drm_file *file_priv = filp->private_data;
739 struct drm_device *dev = file_priv->minor->dev;
740 unsigned int nr = DRM_IOCTL_NR(cmd);
743 * Do extra checking on driver private ioctls.
746 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
747 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
748 struct drm_ioctl_desc *ioctl =
749 &vmw_ioctls[nr - DRM_COMMAND_BASE];
751 if (unlikely(ioctl->cmd_drv != cmd)) {
752 DRM_ERROR("Invalid command format, ioctl %d\n",
753 nr - DRM_COMMAND_BASE);
758 return drm_ioctl(filp, cmd, arg);
761 static int vmw_firstopen(struct drm_device *dev)
763 struct vmw_private *dev_priv = vmw_priv(dev);
764 dev_priv->is_opened = true;
769 static void vmw_lastclose(struct drm_device *dev)
771 struct vmw_private *dev_priv = vmw_priv(dev);
772 struct drm_crtc *crtc;
773 struct drm_mode_set set;
777 * Do nothing on the lastclose call from drm_unload.
780 if (!dev_priv->is_opened)
783 dev_priv->is_opened = false;
788 set.connectors = NULL;
789 set.num_connectors = 0;
791 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
793 ret = crtc->funcs->set_config(&set);
799 static void vmw_master_init(struct vmw_master *vmaster)
801 ttm_lock_init(&vmaster->lock);
802 INIT_LIST_HEAD(&vmaster->fb_surf);
803 mutex_init(&vmaster->fb_surf_mutex);
806 static int vmw_master_create(struct drm_device *dev,
807 struct drm_master *master)
809 struct vmw_master *vmaster;
811 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
812 if (unlikely(vmaster == NULL))
815 vmw_master_init(vmaster);
816 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
817 master->driver_priv = vmaster;
822 static void vmw_master_destroy(struct drm_device *dev,
823 struct drm_master *master)
825 struct vmw_master *vmaster = vmw_master(master);
827 master->driver_priv = NULL;
832 static int vmw_master_set(struct drm_device *dev,
833 struct drm_file *file_priv,
836 struct vmw_private *dev_priv = vmw_priv(dev);
837 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
838 struct vmw_master *active = dev_priv->active_master;
839 struct vmw_master *vmaster = vmw_master(file_priv->master);
842 if (!dev_priv->enable_fb) {
843 ret = vmw_3d_resource_inc(dev_priv, true);
844 if (unlikely(ret != 0))
846 vmw_kms_save_vga(dev_priv);
847 mutex_lock(&dev_priv->hw_mutex);
848 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
849 mutex_unlock(&dev_priv->hw_mutex);
853 BUG_ON(active != &dev_priv->fbdev_master);
854 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
855 if (unlikely(ret != 0))
856 goto out_no_active_lock;
858 ttm_lock_set_kill(&active->lock, true, SIGTERM);
859 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
860 if (unlikely(ret != 0)) {
861 DRM_ERROR("Unable to clean VRAM on "
865 dev_priv->active_master = NULL;
868 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
870 ttm_vt_unlock(&vmaster->lock);
871 BUG_ON(vmw_fp->locked_master != file_priv->master);
872 drm_master_put(&vmw_fp->locked_master);
875 dev_priv->active_master = vmaster;
880 if (!dev_priv->enable_fb) {
881 mutex_lock(&dev_priv->hw_mutex);
882 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
883 mutex_unlock(&dev_priv->hw_mutex);
884 vmw_kms_restore_vga(dev_priv);
885 vmw_3d_resource_dec(dev_priv, true);
890 static void vmw_master_drop(struct drm_device *dev,
891 struct drm_file *file_priv,
894 struct vmw_private *dev_priv = vmw_priv(dev);
895 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
896 struct vmw_master *vmaster = vmw_master(file_priv->master);
900 * Make sure the master doesn't disappear while we have
904 vmw_fp->locked_master = drm_master_get(file_priv->master);
905 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
906 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
908 if (unlikely((ret != 0))) {
909 DRM_ERROR("Unable to lock TTM at VT switch.\n");
910 drm_master_put(&vmw_fp->locked_master);
913 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
915 if (!dev_priv->enable_fb) {
916 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
917 if (unlikely(ret != 0))
918 DRM_ERROR("Unable to clean VRAM on master drop.\n");
919 mutex_lock(&dev_priv->hw_mutex);
920 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
921 mutex_unlock(&dev_priv->hw_mutex);
922 vmw_kms_restore_vga(dev_priv);
923 vmw_3d_resource_dec(dev_priv, true);
926 dev_priv->active_master = &dev_priv->fbdev_master;
927 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
928 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
930 if (dev_priv->enable_fb)
935 static void vmw_remove(struct pci_dev *pdev)
937 struct drm_device *dev = pci_get_drvdata(pdev);
942 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
945 struct vmw_private *dev_priv =
946 container_of(nb, struct vmw_private, pm_nb);
947 struct vmw_master *vmaster = dev_priv->active_master;
950 case PM_HIBERNATION_PREPARE:
951 case PM_SUSPEND_PREPARE:
952 ttm_suspend_lock(&vmaster->lock);
955 * This empties VRAM and unbinds all GMR bindings.
956 * Buffer contents is moved to swappable memory.
958 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
959 ttm_bo_swapout_all(&dev_priv->bdev);
962 case PM_POST_HIBERNATION:
963 case PM_POST_SUSPEND:
964 case PM_POST_RESTORE:
965 ttm_suspend_unlock(&vmaster->lock);
968 case PM_RESTORE_PREPARE:
977 * These might not be needed with the virtual SVGA device.
980 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
982 struct drm_device *dev = pci_get_drvdata(pdev);
983 struct vmw_private *dev_priv = vmw_priv(dev);
985 if (dev_priv->num_3d_resources != 0) {
986 DRM_INFO("Can't suspend or hibernate "
987 "while 3D resources are active.\n");
991 pci_save_state(pdev);
992 pci_disable_device(pdev);
993 pci_set_power_state(pdev, PCI_D3hot);
997 static int vmw_pci_resume(struct pci_dev *pdev)
999 pci_set_power_state(pdev, PCI_D0);
1000 pci_restore_state(pdev);
1001 return pci_enable_device(pdev);
1004 static int vmw_pm_suspend(struct device *kdev)
1006 struct pci_dev *pdev = to_pci_dev(kdev);
1007 struct pm_message dummy;
1011 return vmw_pci_suspend(pdev, dummy);
1014 static int vmw_pm_resume(struct device *kdev)
1016 struct pci_dev *pdev = to_pci_dev(kdev);
1018 return vmw_pci_resume(pdev);
1021 static int vmw_pm_prepare(struct device *kdev)
1023 struct pci_dev *pdev = to_pci_dev(kdev);
1024 struct drm_device *dev = pci_get_drvdata(pdev);
1025 struct vmw_private *dev_priv = vmw_priv(dev);
1028 * Release 3d reference held by fbdev and potentially
1031 dev_priv->suspended = true;
1032 if (dev_priv->enable_fb)
1033 vmw_3d_resource_dec(dev_priv, true);
1035 if (dev_priv->num_3d_resources != 0) {
1037 DRM_INFO("Can't suspend or hibernate "
1038 "while 3D resources are active.\n");
1040 if (dev_priv->enable_fb)
1041 vmw_3d_resource_inc(dev_priv, true);
1042 dev_priv->suspended = false;
1049 static void vmw_pm_complete(struct device *kdev)
1051 struct pci_dev *pdev = to_pci_dev(kdev);
1052 struct drm_device *dev = pci_get_drvdata(pdev);
1053 struct vmw_private *dev_priv = vmw_priv(dev);
1055 mutex_lock(&dev_priv->hw_mutex);
1056 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1057 (void) vmw_read(dev_priv, SVGA_REG_ID);
1058 mutex_unlock(&dev_priv->hw_mutex);
1061 * Reclaim 3d reference held by fbdev and potentially
1064 if (dev_priv->enable_fb)
1065 vmw_3d_resource_inc(dev_priv, false);
1067 dev_priv->suspended = false;
1070 static const struct dev_pm_ops vmw_pm_ops = {
1071 .prepare = vmw_pm_prepare,
1072 .complete = vmw_pm_complete,
1073 .suspend = vmw_pm_suspend,
1074 .resume = vmw_pm_resume,
1077 static struct drm_driver driver = {
1078 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1080 .load = vmw_driver_load,
1081 .unload = vmw_driver_unload,
1082 .firstopen = vmw_firstopen,
1083 .lastclose = vmw_lastclose,
1084 .irq_preinstall = vmw_irq_preinstall,
1085 .irq_postinstall = vmw_irq_postinstall,
1086 .irq_uninstall = vmw_irq_uninstall,
1087 .irq_handler = vmw_irq_handler,
1088 .get_vblank_counter = vmw_get_vblank_counter,
1089 .enable_vblank = vmw_enable_vblank,
1090 .disable_vblank = vmw_disable_vblank,
1091 .reclaim_buffers_locked = NULL,
1092 .ioctls = vmw_ioctls,
1093 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1094 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1095 .master_create = vmw_master_create,
1096 .master_destroy = vmw_master_destroy,
1097 .master_set = vmw_master_set,
1098 .master_drop = vmw_master_drop,
1099 .open = vmw_driver_open,
1100 .postclose = vmw_postclose,
1102 .dumb_create = vmw_dumb_create,
1103 .dumb_map_offset = vmw_dumb_map_offset,
1104 .dumb_destroy = vmw_dumb_destroy,
1107 .owner = THIS_MODULE,
1109 .release = drm_release,
1110 .unlocked_ioctl = vmw_unlocked_ioctl,
1112 .poll = vmw_fops_poll,
1113 .read = vmw_fops_read,
1114 .fasync = drm_fasync,
1115 #if defined(CONFIG_COMPAT)
1116 .compat_ioctl = drm_compat_ioctl,
1118 .llseek = noop_llseek,
1120 .name = VMWGFX_DRIVER_NAME,
1121 .desc = VMWGFX_DRIVER_DESC,
1122 .date = VMWGFX_DRIVER_DATE,
1123 .major = VMWGFX_DRIVER_MAJOR,
1124 .minor = VMWGFX_DRIVER_MINOR,
1125 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1128 static struct pci_driver vmw_pci_driver = {
1129 .name = VMWGFX_DRIVER_NAME,
1130 .id_table = vmw_pci_id_list,
1132 .remove = vmw_remove,
1138 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1140 return drm_get_pci_dev(pdev, ent, &driver);
1143 static int __init vmwgfx_init(void)
1147 #ifdef CONFIG_VGA_CONSOLE
1148 if (vgacon_text_force())
1152 ret = drm_pci_init(&driver, &vmw_pci_driver);
1154 DRM_ERROR("Failed initializing DRM.\n");
1158 static void __exit vmwgfx_exit(void)
1160 drm_pci_exit(&driver, &vmw_pci_driver);
1163 module_init(vmwgfx_init);
1164 module_exit(vmwgfx_exit);
1166 MODULE_AUTHOR("VMware Inc. and others");
1167 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1168 MODULE_LICENSE("GPL and additional rights");
1169 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1170 __stringify(VMWGFX_DRIVER_MINOR) "."
1171 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."