1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
30 #include "ttm/ttm_placement.h"
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_object.h"
33 #include "ttm/ttm_module.h"
35 #define VMWGFX_DRIVER_NAME "vmwgfx"
36 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37 #define VMWGFX_CHIP_SVGAII 0
38 #define VMW_FB_RESERVATION 0
41 * Fully encoded drm commands. Might move to vmw_drm.h
44 #define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50 #define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
57 #define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60 #define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63 #define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
67 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73 #define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76 #define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79 #define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82 #define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
85 #define DRM_IOCTL_VMW_GET_3D_CAP \
86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
87 struct drm_vmw_get_3d_cap_arg)
88 #define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
91 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
93 struct drm_vmw_fence_signaled_arg)
94 #define DRM_IOCTL_VMW_FENCE_UNREF \
95 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
96 struct drm_vmw_fence_arg)
97 #define DRM_IOCTL_VMW_PRESENT \
98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
99 struct drm_vmw_present_arg)
100 #define DRM_IOCTL_VMW_PRESENT_READBACK \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
102 struct drm_vmw_present_readback_arg)
105 * The core DRM version of this macro doesn't account for
109 #define VMW_IOCTL_DEF(ioctl, func, flags) \
110 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
116 static struct drm_ioctl_desc vmw_ioctls[] = {
117 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
118 DRM_AUTH | DRM_UNLOCKED),
119 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
120 DRM_AUTH | DRM_UNLOCKED),
121 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
122 DRM_AUTH | DRM_UNLOCKED),
123 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
124 vmw_kms_cursor_bypass_ioctl,
125 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
127 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
128 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
129 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
130 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
131 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
132 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
134 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
135 DRM_AUTH | DRM_UNLOCKED),
136 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
137 DRM_AUTH | DRM_UNLOCKED),
138 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
139 DRM_AUTH | DRM_UNLOCKED),
140 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
143 DRM_AUTH | DRM_UNLOCKED),
144 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
145 DRM_AUTH | DRM_UNLOCKED),
146 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
147 DRM_AUTH | DRM_UNLOCKED),
148 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
149 vmw_fence_obj_signaled_ioctl,
150 DRM_AUTH | DRM_UNLOCKED),
151 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
152 DRM_AUTH | DRM_UNLOCKED),
153 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
154 DRM_AUTH | DRM_UNLOCKED),
156 /* these allow direct access to the framebuffers mark as master only */
157 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
158 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
159 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
160 vmw_present_readback_ioctl,
161 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
164 static struct pci_device_id vmw_pci_id_list[] = {
165 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
169 static int enable_fbdev;
171 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
172 static void vmw_master_init(struct vmw_master *);
173 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
176 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
177 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
179 static void vmw_print_capabilities(uint32_t capabilities)
181 DRM_INFO("Capabilities:\n");
182 if (capabilities & SVGA_CAP_RECT_COPY)
183 DRM_INFO(" Rect copy.\n");
184 if (capabilities & SVGA_CAP_CURSOR)
185 DRM_INFO(" Cursor.\n");
186 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
187 DRM_INFO(" Cursor bypass.\n");
188 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
189 DRM_INFO(" Cursor bypass 2.\n");
190 if (capabilities & SVGA_CAP_8BIT_EMULATION)
191 DRM_INFO(" 8bit emulation.\n");
192 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
193 DRM_INFO(" Alpha cursor.\n");
194 if (capabilities & SVGA_CAP_3D)
196 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
197 DRM_INFO(" Extended Fifo.\n");
198 if (capabilities & SVGA_CAP_MULTIMON)
199 DRM_INFO(" Multimon.\n");
200 if (capabilities & SVGA_CAP_PITCHLOCK)
201 DRM_INFO(" Pitchlock.\n");
202 if (capabilities & SVGA_CAP_IRQMASK)
203 DRM_INFO(" Irq mask.\n");
204 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
205 DRM_INFO(" Display Topology.\n");
206 if (capabilities & SVGA_CAP_GMR)
208 if (capabilities & SVGA_CAP_TRACES)
209 DRM_INFO(" Traces.\n");
210 if (capabilities & SVGA_CAP_GMR2)
211 DRM_INFO(" GMR2.\n");
212 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
213 DRM_INFO(" Screen Object 2.\n");
216 static int vmw_request_device(struct vmw_private *dev_priv)
220 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
221 if (unlikely(ret != 0)) {
222 DRM_ERROR("Unable to initialize FIFO.\n");
225 vmw_fence_fifo_up(dev_priv->fman);
230 static void vmw_release_device(struct vmw_private *dev_priv)
232 vmw_fence_fifo_down(dev_priv->fman);
233 vmw_fifo_release(dev_priv, &dev_priv->fifo);
237 * Increase the 3d resource refcount.
238 * If the count was prevously zero, initialize the fifo, switching to svga
239 * mode. Note that the master holds a ref as well, and may request an
240 * explicit switch to svga mode if fb is not running, using @unhide_svga.
242 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
247 mutex_lock(&dev_priv->release_mutex);
248 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
249 ret = vmw_request_device(dev_priv);
250 if (unlikely(ret != 0))
251 --dev_priv->num_3d_resources;
252 } else if (unhide_svga) {
253 mutex_lock(&dev_priv->hw_mutex);
254 vmw_write(dev_priv, SVGA_REG_ENABLE,
255 vmw_read(dev_priv, SVGA_REG_ENABLE) &
256 ~SVGA_REG_ENABLE_HIDE);
257 mutex_unlock(&dev_priv->hw_mutex);
260 mutex_unlock(&dev_priv->release_mutex);
265 * Decrease the 3d resource refcount.
266 * If the count reaches zero, disable the fifo, switching to vga mode.
267 * Note that the master holds a refcount as well, and may request an
268 * explicit switch to vga mode when it releases its refcount to account
269 * for the situation of an X server vt switch to VGA with 3d resources
272 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
277 mutex_lock(&dev_priv->release_mutex);
278 if (unlikely(--dev_priv->num_3d_resources == 0))
279 vmw_release_device(dev_priv);
280 else if (hide_svga) {
281 mutex_lock(&dev_priv->hw_mutex);
282 vmw_write(dev_priv, SVGA_REG_ENABLE,
283 vmw_read(dev_priv, SVGA_REG_ENABLE) |
284 SVGA_REG_ENABLE_HIDE);
285 mutex_unlock(&dev_priv->hw_mutex);
288 n3d = (int32_t) dev_priv->num_3d_resources;
289 mutex_unlock(&dev_priv->release_mutex);
294 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
296 struct vmw_private *dev_priv;
300 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
301 if (unlikely(dev_priv == NULL)) {
302 DRM_ERROR("Failed allocating a device private struct.\n");
305 memset(dev_priv, 0, sizeof(*dev_priv));
308 dev_priv->vmw_chipset = chipset;
309 dev_priv->last_read_seqno = (uint32_t) -100;
310 mutex_init(&dev_priv->hw_mutex);
311 mutex_init(&dev_priv->cmdbuf_mutex);
312 mutex_init(&dev_priv->release_mutex);
313 rwlock_init(&dev_priv->resource_lock);
314 idr_init(&dev_priv->context_idr);
315 idr_init(&dev_priv->surface_idr);
316 idr_init(&dev_priv->stream_idr);
317 mutex_init(&dev_priv->init_mutex);
318 init_waitqueue_head(&dev_priv->fence_queue);
319 init_waitqueue_head(&dev_priv->fifo_queue);
320 dev_priv->fence_queue_waiters = 0;
321 atomic_set(&dev_priv->fifo_queue_waiters, 0);
323 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
324 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
325 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
327 dev_priv->enable_fb = enable_fbdev;
329 mutex_lock(&dev_priv->hw_mutex);
331 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
332 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
333 if (svga_id != SVGA_ID_2) {
335 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
336 mutex_unlock(&dev_priv->hw_mutex);
340 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
342 if (dev_priv->capabilities & SVGA_CAP_GMR) {
343 dev_priv->max_gmr_descriptors =
345 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
346 dev_priv->max_gmr_ids =
347 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
349 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
350 dev_priv->max_gmr_pages =
351 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
352 dev_priv->memory_size =
353 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
356 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
357 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
358 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
359 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
361 mutex_unlock(&dev_priv->hw_mutex);
363 vmw_print_capabilities(dev_priv->capabilities);
365 if (dev_priv->capabilities & SVGA_CAP_GMR) {
366 DRM_INFO("Max GMR ids is %u\n",
367 (unsigned)dev_priv->max_gmr_ids);
368 DRM_INFO("Max GMR descriptors is %u\n",
369 (unsigned)dev_priv->max_gmr_descriptors);
371 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
372 DRM_INFO("Max number of GMR pages is %u\n",
373 (unsigned)dev_priv->max_gmr_pages);
374 DRM_INFO("Max dedicated hypervisor graphics memory is %u\n",
375 (unsigned)dev_priv->memory_size);
377 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
378 dev_priv->vram_start, dev_priv->vram_size / 1024);
379 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
380 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
382 ret = vmw_ttm_global_init(dev_priv);
383 if (unlikely(ret != 0))
387 vmw_master_init(&dev_priv->fbdev_master);
388 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
389 dev_priv->active_master = &dev_priv->fbdev_master;
392 ret = ttm_bo_device_init(&dev_priv->bdev,
393 dev_priv->bo_global_ref.ref.object,
394 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
396 if (unlikely(ret != 0)) {
397 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
401 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
402 (dev_priv->vram_size >> PAGE_SHIFT));
403 if (unlikely(ret != 0)) {
404 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
408 dev_priv->has_gmr = true;
409 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
410 dev_priv->max_gmr_ids) != 0) {
411 DRM_INFO("No GMR memory available. "
412 "Graphics memory resources are very limited.\n");
413 dev_priv->has_gmr = false;
416 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
417 dev_priv->mmio_size, DRM_MTRR_WC);
419 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
420 dev_priv->mmio_size);
422 if (unlikely(dev_priv->mmio_virt == NULL)) {
424 DRM_ERROR("Failed mapping MMIO.\n");
428 /* Need mmio memory to check for fifo pitchlock cap. */
429 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
430 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
431 !vmw_fifo_have_pitchlock(dev_priv)) {
433 DRM_ERROR("Hardware has no pitchlock\n");
437 dev_priv->tdev = ttm_object_device_init
438 (dev_priv->mem_global_ref.object, 12);
440 if (unlikely(dev_priv->tdev == NULL)) {
441 DRM_ERROR("Unable to initialize TTM object management.\n");
446 dev->dev_private = dev_priv;
448 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
449 dev_priv->stealth = (ret != 0);
450 if (dev_priv->stealth) {
452 * Request at least the mmio PCI resource.
455 DRM_INFO("It appears like vesafb is loaded. "
456 "Ignore above error if any.\n");
457 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
458 if (unlikely(ret != 0)) {
459 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
464 dev_priv->fman = vmw_fence_manager_init(dev_priv);
465 if (unlikely(dev_priv->fman == NULL))
468 /* Need to start the fifo to check if we can do screen objects */
469 ret = vmw_3d_resource_inc(dev_priv, true);
470 if (unlikely(ret != 0))
472 vmw_kms_save_vga(dev_priv);
474 /* Start kms and overlay systems, needs fifo. */
475 ret = vmw_kms_init(dev_priv);
476 if (unlikely(ret != 0))
478 vmw_overlay_init(dev_priv);
480 /* 3D Depends on Screen Objects being used. */
481 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
482 "Detected device 3D availability.\n" :
483 "Detected no device 3D availability.\n");
485 /* We might be done with the fifo now */
486 if (dev_priv->enable_fb) {
487 vmw_fb_init(dev_priv);
489 vmw_kms_restore_vga(dev_priv);
490 vmw_3d_resource_dec(dev_priv, true);
493 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
494 ret = drm_irq_install(dev);
495 if (unlikely(ret != 0)) {
496 DRM_ERROR("Failed installing irq: %d\n", ret);
501 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
502 register_pm_notifier(&dev_priv->pm_nb);
507 if (dev_priv->enable_fb)
508 vmw_fb_close(dev_priv);
509 vmw_overlay_close(dev_priv);
510 vmw_kms_close(dev_priv);
512 /* We still have a 3D resource reference held */
513 if (dev_priv->enable_fb) {
514 vmw_kms_restore_vga(dev_priv);
515 vmw_3d_resource_dec(dev_priv, false);
518 vmw_fence_manager_takedown(dev_priv->fman);
520 if (dev_priv->stealth)
521 pci_release_region(dev->pdev, 2);
523 pci_release_regions(dev->pdev);
525 ttm_object_device_release(&dev_priv->tdev);
527 iounmap(dev_priv->mmio_virt);
529 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
530 dev_priv->mmio_size, DRM_MTRR_WC);
531 if (dev_priv->has_gmr)
532 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
533 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
535 (void)ttm_bo_device_release(&dev_priv->bdev);
537 vmw_ttm_global_release(dev_priv);
539 idr_destroy(&dev_priv->surface_idr);
540 idr_destroy(&dev_priv->context_idr);
541 idr_destroy(&dev_priv->stream_idr);
546 static int vmw_driver_unload(struct drm_device *dev)
548 struct vmw_private *dev_priv = vmw_priv(dev);
550 unregister_pm_notifier(&dev_priv->pm_nb);
552 if (dev_priv->ctx.cmd_bounce)
553 vfree(dev_priv->ctx.cmd_bounce);
554 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
555 drm_irq_uninstall(dev_priv->dev);
556 if (dev_priv->enable_fb) {
557 vmw_fb_close(dev_priv);
558 vmw_kms_restore_vga(dev_priv);
559 vmw_3d_resource_dec(dev_priv, false);
561 vmw_kms_close(dev_priv);
562 vmw_overlay_close(dev_priv);
563 vmw_fence_manager_takedown(dev_priv->fman);
564 if (dev_priv->stealth)
565 pci_release_region(dev->pdev, 2);
567 pci_release_regions(dev->pdev);
569 ttm_object_device_release(&dev_priv->tdev);
570 iounmap(dev_priv->mmio_virt);
571 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
572 dev_priv->mmio_size, DRM_MTRR_WC);
573 if (dev_priv->has_gmr)
574 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
575 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
576 (void)ttm_bo_device_release(&dev_priv->bdev);
577 vmw_ttm_global_release(dev_priv);
578 idr_destroy(&dev_priv->surface_idr);
579 idr_destroy(&dev_priv->context_idr);
580 idr_destroy(&dev_priv->stream_idr);
587 static void vmw_postclose(struct drm_device *dev,
588 struct drm_file *file_priv)
590 struct vmw_fpriv *vmw_fp;
592 vmw_fp = vmw_fpriv(file_priv);
593 ttm_object_file_release(&vmw_fp->tfile);
594 if (vmw_fp->locked_master)
595 drm_master_put(&vmw_fp->locked_master);
599 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
601 struct vmw_private *dev_priv = vmw_priv(dev);
602 struct vmw_fpriv *vmw_fp;
605 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
606 if (unlikely(vmw_fp == NULL))
609 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
610 if (unlikely(vmw_fp->tfile == NULL))
613 file_priv->driver_priv = vmw_fp;
615 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
616 dev_priv->bdev.dev_mapping =
617 file_priv->filp->f_path.dentry->d_inode->i_mapping;
626 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
629 struct drm_file *file_priv = filp->private_data;
630 struct drm_device *dev = file_priv->minor->dev;
631 unsigned int nr = DRM_IOCTL_NR(cmd);
634 * Do extra checking on driver private ioctls.
637 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
638 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
639 struct drm_ioctl_desc *ioctl =
640 &vmw_ioctls[nr - DRM_COMMAND_BASE];
642 if (unlikely(ioctl->cmd_drv != cmd)) {
643 DRM_ERROR("Invalid command format, ioctl %d\n",
644 nr - DRM_COMMAND_BASE);
649 return drm_ioctl(filp, cmd, arg);
652 static int vmw_firstopen(struct drm_device *dev)
654 struct vmw_private *dev_priv = vmw_priv(dev);
655 dev_priv->is_opened = true;
660 static void vmw_lastclose(struct drm_device *dev)
662 struct vmw_private *dev_priv = vmw_priv(dev);
663 struct drm_crtc *crtc;
664 struct drm_mode_set set;
668 * Do nothing on the lastclose call from drm_unload.
671 if (!dev_priv->is_opened)
674 dev_priv->is_opened = false;
679 set.connectors = NULL;
680 set.num_connectors = 0;
682 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
684 ret = crtc->funcs->set_config(&set);
690 static void vmw_master_init(struct vmw_master *vmaster)
692 ttm_lock_init(&vmaster->lock);
693 INIT_LIST_HEAD(&vmaster->fb_surf);
694 mutex_init(&vmaster->fb_surf_mutex);
697 static int vmw_master_create(struct drm_device *dev,
698 struct drm_master *master)
700 struct vmw_master *vmaster;
702 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
703 if (unlikely(vmaster == NULL))
706 vmw_master_init(vmaster);
707 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
708 master->driver_priv = vmaster;
713 static void vmw_master_destroy(struct drm_device *dev,
714 struct drm_master *master)
716 struct vmw_master *vmaster = vmw_master(master);
718 master->driver_priv = NULL;
723 static int vmw_master_set(struct drm_device *dev,
724 struct drm_file *file_priv,
727 struct vmw_private *dev_priv = vmw_priv(dev);
728 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
729 struct vmw_master *active = dev_priv->active_master;
730 struct vmw_master *vmaster = vmw_master(file_priv->master);
733 if (!dev_priv->enable_fb) {
734 ret = vmw_3d_resource_inc(dev_priv, true);
735 if (unlikely(ret != 0))
737 vmw_kms_save_vga(dev_priv);
738 mutex_lock(&dev_priv->hw_mutex);
739 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
740 mutex_unlock(&dev_priv->hw_mutex);
744 BUG_ON(active != &dev_priv->fbdev_master);
745 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
746 if (unlikely(ret != 0))
747 goto out_no_active_lock;
749 ttm_lock_set_kill(&active->lock, true, SIGTERM);
750 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
751 if (unlikely(ret != 0)) {
752 DRM_ERROR("Unable to clean VRAM on "
756 dev_priv->active_master = NULL;
759 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
761 ttm_vt_unlock(&vmaster->lock);
762 BUG_ON(vmw_fp->locked_master != file_priv->master);
763 drm_master_put(&vmw_fp->locked_master);
766 dev_priv->active_master = vmaster;
771 if (!dev_priv->enable_fb) {
772 mutex_lock(&dev_priv->hw_mutex);
773 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
774 mutex_unlock(&dev_priv->hw_mutex);
775 vmw_kms_restore_vga(dev_priv);
776 vmw_3d_resource_dec(dev_priv, true);
781 static void vmw_master_drop(struct drm_device *dev,
782 struct drm_file *file_priv,
785 struct vmw_private *dev_priv = vmw_priv(dev);
786 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
787 struct vmw_master *vmaster = vmw_master(file_priv->master);
791 * Make sure the master doesn't disappear while we have
795 vmw_fp->locked_master = drm_master_get(file_priv->master);
796 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
797 if (unlikely((ret != 0))) {
798 DRM_ERROR("Unable to lock TTM at VT switch.\n");
799 drm_master_put(&vmw_fp->locked_master);
802 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
804 if (!dev_priv->enable_fb) {
805 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
806 if (unlikely(ret != 0))
807 DRM_ERROR("Unable to clean VRAM on master drop.\n");
808 mutex_lock(&dev_priv->hw_mutex);
809 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
810 mutex_unlock(&dev_priv->hw_mutex);
811 vmw_kms_restore_vga(dev_priv);
812 vmw_3d_resource_dec(dev_priv, true);
815 dev_priv->active_master = &dev_priv->fbdev_master;
816 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
817 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
819 if (dev_priv->enable_fb)
824 static void vmw_remove(struct pci_dev *pdev)
826 struct drm_device *dev = pci_get_drvdata(pdev);
831 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
834 struct vmw_private *dev_priv =
835 container_of(nb, struct vmw_private, pm_nb);
836 struct vmw_master *vmaster = dev_priv->active_master;
839 case PM_HIBERNATION_PREPARE:
840 case PM_SUSPEND_PREPARE:
841 ttm_suspend_lock(&vmaster->lock);
844 * This empties VRAM and unbinds all GMR bindings.
845 * Buffer contents is moved to swappable memory.
847 ttm_bo_swapout_all(&dev_priv->bdev);
850 case PM_POST_HIBERNATION:
851 case PM_POST_SUSPEND:
852 case PM_POST_RESTORE:
853 ttm_suspend_unlock(&vmaster->lock);
856 case PM_RESTORE_PREPARE:
865 * These might not be needed with the virtual SVGA device.
868 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
870 struct drm_device *dev = pci_get_drvdata(pdev);
871 struct vmw_private *dev_priv = vmw_priv(dev);
873 if (dev_priv->num_3d_resources != 0) {
874 DRM_INFO("Can't suspend or hibernate "
875 "while 3D resources are active.\n");
879 pci_save_state(pdev);
880 pci_disable_device(pdev);
881 pci_set_power_state(pdev, PCI_D3hot);
885 static int vmw_pci_resume(struct pci_dev *pdev)
887 pci_set_power_state(pdev, PCI_D0);
888 pci_restore_state(pdev);
889 return pci_enable_device(pdev);
892 static int vmw_pm_suspend(struct device *kdev)
894 struct pci_dev *pdev = to_pci_dev(kdev);
895 struct pm_message dummy;
899 return vmw_pci_suspend(pdev, dummy);
902 static int vmw_pm_resume(struct device *kdev)
904 struct pci_dev *pdev = to_pci_dev(kdev);
906 return vmw_pci_resume(pdev);
909 static int vmw_pm_prepare(struct device *kdev)
911 struct pci_dev *pdev = to_pci_dev(kdev);
912 struct drm_device *dev = pci_get_drvdata(pdev);
913 struct vmw_private *dev_priv = vmw_priv(dev);
916 * Release 3d reference held by fbdev and potentially
919 dev_priv->suspended = true;
920 if (dev_priv->enable_fb)
921 vmw_3d_resource_dec(dev_priv, true);
923 if (dev_priv->num_3d_resources != 0) {
925 DRM_INFO("Can't suspend or hibernate "
926 "while 3D resources are active.\n");
928 if (dev_priv->enable_fb)
929 vmw_3d_resource_inc(dev_priv, true);
930 dev_priv->suspended = false;
937 static void vmw_pm_complete(struct device *kdev)
939 struct pci_dev *pdev = to_pci_dev(kdev);
940 struct drm_device *dev = pci_get_drvdata(pdev);
941 struct vmw_private *dev_priv = vmw_priv(dev);
944 * Reclaim 3d reference held by fbdev and potentially
947 if (dev_priv->enable_fb)
948 vmw_3d_resource_inc(dev_priv, false);
950 dev_priv->suspended = false;
953 static const struct dev_pm_ops vmw_pm_ops = {
954 .prepare = vmw_pm_prepare,
955 .complete = vmw_pm_complete,
956 .suspend = vmw_pm_suspend,
957 .resume = vmw_pm_resume,
960 static struct drm_driver driver = {
961 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
963 .load = vmw_driver_load,
964 .unload = vmw_driver_unload,
965 .firstopen = vmw_firstopen,
966 .lastclose = vmw_lastclose,
967 .irq_preinstall = vmw_irq_preinstall,
968 .irq_postinstall = vmw_irq_postinstall,
969 .irq_uninstall = vmw_irq_uninstall,
970 .irq_handler = vmw_irq_handler,
971 .get_vblank_counter = vmw_get_vblank_counter,
972 .reclaim_buffers_locked = NULL,
973 .ioctls = vmw_ioctls,
974 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
975 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
976 .master_create = vmw_master_create,
977 .master_destroy = vmw_master_destroy,
978 .master_set = vmw_master_set,
979 .master_drop = vmw_master_drop,
980 .open = vmw_driver_open,
981 .postclose = vmw_postclose,
983 .owner = THIS_MODULE,
985 .release = drm_release,
986 .unlocked_ioctl = vmw_unlocked_ioctl,
989 .fasync = drm_fasync,
990 #if defined(CONFIG_COMPAT)
991 .compat_ioctl = drm_compat_ioctl,
993 .llseek = noop_llseek,
995 .name = VMWGFX_DRIVER_NAME,
996 .desc = VMWGFX_DRIVER_DESC,
997 .date = VMWGFX_DRIVER_DATE,
998 .major = VMWGFX_DRIVER_MAJOR,
999 .minor = VMWGFX_DRIVER_MINOR,
1000 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1003 static struct pci_driver vmw_pci_driver = {
1004 .name = VMWGFX_DRIVER_NAME,
1005 .id_table = vmw_pci_id_list,
1007 .remove = vmw_remove,
1013 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1015 return drm_get_pci_dev(pdev, ent, &driver);
1018 static int __init vmwgfx_init(void)
1021 ret = drm_pci_init(&driver, &vmw_pci_driver);
1023 DRM_ERROR("Failed initializing DRM.\n");
1027 static void __exit vmwgfx_exit(void)
1029 drm_pci_exit(&driver, &vmw_pci_driver);
1032 module_init(vmwgfx_init);
1033 module_exit(vmwgfx_exit);
1035 MODULE_AUTHOR("VMware Inc. and others");
1036 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1037 MODULE_LICENSE("GPL and additional rights");
1038 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1039 __stringify(VMWGFX_DRIVER_MINOR) "."
1040 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."