vmwgfx: Switch to VGA when we drop master and vmwgfx fbdev is not active
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "drmP.h"
29 #include "vmwgfx_drv.h"
30 #include "ttm/ttm_placement.h"
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_object.h"
33 #include "ttm/ttm_module.h"
34
35 #define VMWGFX_DRIVER_NAME "vmwgfx"
36 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37 #define VMWGFX_CHIP_SVGAII 0
38 #define VMW_FB_RESERVATION 0
39
40 /**
41  * Fully encoded drm commands. Might move to vmw_drm.h
42  */
43
44 #define DRM_IOCTL_VMW_GET_PARAM                                 \
45         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
46                  struct drm_vmw_getparam_arg)
47 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
48         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
49                 union drm_vmw_alloc_dmabuf_arg)
50 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
51         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
52                 struct drm_vmw_unref_dmabuf_arg)
53 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
54         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
55                  struct drm_vmw_cursor_bypass_arg)
56
57 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
58         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
59                  struct drm_vmw_control_stream_arg)
60 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
61         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
62                  struct drm_vmw_stream_arg)
63 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
64         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
65                  struct drm_vmw_stream_arg)
66
67 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
68         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
69                 struct drm_vmw_context_arg)
70 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
71         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
72                 struct drm_vmw_context_arg)
73 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
74         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
75                  union drm_vmw_surface_create_arg)
76 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
77         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
78                  struct drm_vmw_surface_arg)
79 #define DRM_IOCTL_VMW_REF_SURFACE                               \
80         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
81                  union drm_vmw_surface_reference_arg)
82 #define DRM_IOCTL_VMW_EXECBUF                                   \
83         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
84                 struct drm_vmw_execbuf_arg)
85 #define DRM_IOCTL_VMW_FIFO_DEBUG                                \
86         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG,         \
87                  struct drm_vmw_fifo_debug_arg)
88 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
89         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
90                  struct drm_vmw_fence_wait_arg)
91 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
92         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,      \
93                  struct drm_vmw_update_layout_arg)
94
95
96 /**
97  * The core DRM version of this macro doesn't account for
98  * DRM_COMMAND_BASE.
99  */
100
101 #define VMW_IOCTL_DEF(ioctl, func, flags) \
102   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
103
104 /**
105  * Ioctl definitions.
106  */
107
108 static struct drm_ioctl_desc vmw_ioctls[] = {
109         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
110                       DRM_AUTH | DRM_UNLOCKED),
111         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
112                       DRM_AUTH | DRM_UNLOCKED),
113         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
114                       DRM_AUTH | DRM_UNLOCKED),
115         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
116                       vmw_kms_cursor_bypass_ioctl,
117                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118
119         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
120                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
121         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
122                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
123         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
124                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
125
126         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
127                       DRM_AUTH | DRM_UNLOCKED),
128         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
129                       DRM_AUTH | DRM_UNLOCKED),
130         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
131                       DRM_AUTH | DRM_UNLOCKED),
132         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
133                       DRM_AUTH | DRM_UNLOCKED),
134         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
135                       DRM_AUTH | DRM_UNLOCKED),
136         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
137                       DRM_AUTH | DRM_UNLOCKED),
138         VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
139                       DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
140         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
141                       DRM_AUTH | DRM_UNLOCKED),
142         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
144 };
145
146 static struct pci_device_id vmw_pci_id_list[] = {
147         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
148         {0, 0, 0}
149 };
150
151 static int enable_fbdev;
152
153 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154 static void vmw_master_init(struct vmw_master *);
155 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156                               void *ptr);
157
158 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
160
161 static void vmw_print_capabilities(uint32_t capabilities)
162 {
163         DRM_INFO("Capabilities:\n");
164         if (capabilities & SVGA_CAP_RECT_COPY)
165                 DRM_INFO("  Rect copy.\n");
166         if (capabilities & SVGA_CAP_CURSOR)
167                 DRM_INFO("  Cursor.\n");
168         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
169                 DRM_INFO("  Cursor bypass.\n");
170         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
171                 DRM_INFO("  Cursor bypass 2.\n");
172         if (capabilities & SVGA_CAP_8BIT_EMULATION)
173                 DRM_INFO("  8bit emulation.\n");
174         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
175                 DRM_INFO("  Alpha cursor.\n");
176         if (capabilities & SVGA_CAP_3D)
177                 DRM_INFO("  3D.\n");
178         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
179                 DRM_INFO("  Extended Fifo.\n");
180         if (capabilities & SVGA_CAP_MULTIMON)
181                 DRM_INFO("  Multimon.\n");
182         if (capabilities & SVGA_CAP_PITCHLOCK)
183                 DRM_INFO("  Pitchlock.\n");
184         if (capabilities & SVGA_CAP_IRQMASK)
185                 DRM_INFO("  Irq mask.\n");
186         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
187                 DRM_INFO("  Display Topology.\n");
188         if (capabilities & SVGA_CAP_GMR)
189                 DRM_INFO("  GMR.\n");
190         if (capabilities & SVGA_CAP_TRACES)
191                 DRM_INFO("  Traces.\n");
192 }
193
194 static int vmw_request_device(struct vmw_private *dev_priv)
195 {
196         int ret;
197
198         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
199         if (unlikely(ret != 0)) {
200                 DRM_ERROR("Unable to initialize FIFO.\n");
201                 return ret;
202         }
203
204         return 0;
205 }
206
207 static void vmw_release_device(struct vmw_private *dev_priv)
208 {
209         vmw_fifo_release(dev_priv, &dev_priv->fifo);
210 }
211
212 /**
213  * Increase the 3d resource refcount.
214  * If the count was prevously zero, initialize the fifo, switching to svga
215  * mode. Note that the master holds a ref as well, and may request an
216  * explicit switch to svga mode if fb is not running, using @unhide_svga.
217  */
218 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
219                         bool unhide_svga)
220 {
221         int ret = 0;
222
223         mutex_lock(&dev_priv->release_mutex);
224         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
225                 ret = vmw_request_device(dev_priv);
226                 if (unlikely(ret != 0))
227                         --dev_priv->num_3d_resources;
228         } else if (unhide_svga) {
229                 mutex_lock(&dev_priv->hw_mutex);
230                 vmw_write(dev_priv, SVGA_REG_ENABLE,
231                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
232                           ~SVGA_REG_ENABLE_HIDE);
233                 mutex_unlock(&dev_priv->hw_mutex);
234         }
235
236         mutex_unlock(&dev_priv->release_mutex);
237         return ret;
238 }
239
240 /**
241  * Decrease the 3d resource refcount.
242  * If the count reaches zero, disable the fifo, switching to vga mode.
243  * Note that the master holds a refcount as well, and may request an
244  * explicit switch to vga mode when it releases its refcount to account
245  * for the situation of an X server vt switch to VGA with 3d resources
246  * active.
247  */
248 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
249                          bool hide_svga)
250 {
251         int32_t n3d;
252
253         mutex_lock(&dev_priv->release_mutex);
254         if (unlikely(--dev_priv->num_3d_resources == 0))
255                 vmw_release_device(dev_priv);
256         else if (hide_svga) {
257                 mutex_lock(&dev_priv->hw_mutex);
258                 vmw_write(dev_priv, SVGA_REG_ENABLE,
259                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
260                           SVGA_REG_ENABLE_HIDE);
261                 mutex_unlock(&dev_priv->hw_mutex);
262         }
263
264         n3d = (int32_t) dev_priv->num_3d_resources;
265         mutex_unlock(&dev_priv->release_mutex);
266
267         BUG_ON(n3d < 0);
268 }
269
270 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
271 {
272         struct vmw_private *dev_priv;
273         int ret;
274         uint32_t svga_id;
275
276         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
277         if (unlikely(dev_priv == NULL)) {
278                 DRM_ERROR("Failed allocating a device private struct.\n");
279                 return -ENOMEM;
280         }
281         memset(dev_priv, 0, sizeof(*dev_priv));
282
283         dev_priv->dev = dev;
284         dev_priv->vmw_chipset = chipset;
285         dev_priv->last_read_sequence = (uint32_t) -100;
286         mutex_init(&dev_priv->hw_mutex);
287         mutex_init(&dev_priv->cmdbuf_mutex);
288         mutex_init(&dev_priv->release_mutex);
289         rwlock_init(&dev_priv->resource_lock);
290         idr_init(&dev_priv->context_idr);
291         idr_init(&dev_priv->surface_idr);
292         idr_init(&dev_priv->stream_idr);
293         mutex_init(&dev_priv->init_mutex);
294         init_waitqueue_head(&dev_priv->fence_queue);
295         init_waitqueue_head(&dev_priv->fifo_queue);
296         atomic_set(&dev_priv->fence_queue_waiters, 0);
297         atomic_set(&dev_priv->fifo_queue_waiters, 0);
298
299         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
300         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
301         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
302
303         dev_priv->enable_fb = enable_fbdev;
304
305         mutex_lock(&dev_priv->hw_mutex);
306
307         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
308         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
309         if (svga_id != SVGA_ID_2) {
310                 ret = -ENOSYS;
311                 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
312                 mutex_unlock(&dev_priv->hw_mutex);
313                 goto out_err0;
314         }
315
316         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
317
318         if (dev_priv->capabilities & SVGA_CAP_GMR) {
319                 dev_priv->max_gmr_descriptors =
320                         vmw_read(dev_priv,
321                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
322                 dev_priv->max_gmr_ids =
323                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
324         }
325
326         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
327         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
328         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
329         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
330
331         mutex_unlock(&dev_priv->hw_mutex);
332
333         vmw_print_capabilities(dev_priv->capabilities);
334
335         if (dev_priv->capabilities & SVGA_CAP_GMR) {
336                 DRM_INFO("Max GMR ids is %u\n",
337                          (unsigned)dev_priv->max_gmr_ids);
338                 DRM_INFO("Max GMR descriptors is %u\n",
339                          (unsigned)dev_priv->max_gmr_descriptors);
340         }
341         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
342                  dev_priv->vram_start, dev_priv->vram_size / 1024);
343         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
344                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
345
346         ret = vmw_ttm_global_init(dev_priv);
347         if (unlikely(ret != 0))
348                 goto out_err0;
349
350
351         vmw_master_init(&dev_priv->fbdev_master);
352         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
353         dev_priv->active_master = &dev_priv->fbdev_master;
354
355
356         ret = ttm_bo_device_init(&dev_priv->bdev,
357                                  dev_priv->bo_global_ref.ref.object,
358                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
359                                  false);
360         if (unlikely(ret != 0)) {
361                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
362                 goto out_err1;
363         }
364
365         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
366                              (dev_priv->vram_size >> PAGE_SHIFT));
367         if (unlikely(ret != 0)) {
368                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
369                 goto out_err2;
370         }
371
372         dev_priv->has_gmr = true;
373         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
374                            dev_priv->max_gmr_ids) != 0) {
375                 DRM_INFO("No GMR memory available. "
376                          "Graphics memory resources are very limited.\n");
377                 dev_priv->has_gmr = false;
378         }
379
380         dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
381                                            dev_priv->mmio_size, DRM_MTRR_WC);
382
383         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
384                                          dev_priv->mmio_size);
385
386         if (unlikely(dev_priv->mmio_virt == NULL)) {
387                 ret = -ENOMEM;
388                 DRM_ERROR("Failed mapping MMIO.\n");
389                 goto out_err3;
390         }
391
392         /* Need mmio memory to check for fifo pitchlock cap. */
393         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
394             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
395             !vmw_fifo_have_pitchlock(dev_priv)) {
396                 ret = -ENOSYS;
397                 DRM_ERROR("Hardware has no pitchlock\n");
398                 goto out_err4;
399         }
400
401         dev_priv->tdev = ttm_object_device_init
402             (dev_priv->mem_global_ref.object, 12);
403
404         if (unlikely(dev_priv->tdev == NULL)) {
405                 DRM_ERROR("Unable to initialize TTM object management.\n");
406                 ret = -ENOMEM;
407                 goto out_err4;
408         }
409
410         dev->dev_private = dev_priv;
411
412         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
413         dev_priv->stealth = (ret != 0);
414         if (dev_priv->stealth) {
415                 /**
416                  * Request at least the mmio PCI resource.
417                  */
418
419                 DRM_INFO("It appears like vesafb is loaded. "
420                          "Ignore above error if any.\n");
421                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
422                 if (unlikely(ret != 0)) {
423                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
424                         goto out_no_device;
425                 }
426         }
427         ret = vmw_kms_init(dev_priv);
428         if (unlikely(ret != 0))
429                 goto out_no_kms;
430         vmw_overlay_init(dev_priv);
431         if (dev_priv->enable_fb) {
432                 ret = vmw_3d_resource_inc(dev_priv, false);
433                 if (unlikely(ret != 0))
434                         goto out_no_fifo;
435                 vmw_kms_save_vga(dev_priv);
436                 vmw_fb_init(dev_priv);
437                 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
438                          "Detected device 3D availability.\n" :
439                          "Detected no device 3D availability.\n");
440         } else {
441                 DRM_INFO("Delayed 3D detection since we're not "
442                          "running the device in SVGA mode yet.\n");
443         }
444
445         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
446                 ret = drm_irq_install(dev);
447                 if (unlikely(ret != 0)) {
448                         DRM_ERROR("Failed installing irq: %d\n", ret);
449                         goto out_no_irq;
450                 }
451         }
452
453         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
454         register_pm_notifier(&dev_priv->pm_nb);
455
456         return 0;
457
458 out_no_irq:
459         if (dev_priv->enable_fb) {
460                 vmw_fb_close(dev_priv);
461                 vmw_kms_restore_vga(dev_priv);
462                 vmw_3d_resource_dec(dev_priv, false);
463         }
464 out_no_fifo:
465         vmw_overlay_close(dev_priv);
466         vmw_kms_close(dev_priv);
467 out_no_kms:
468         if (dev_priv->stealth)
469                 pci_release_region(dev->pdev, 2);
470         else
471                 pci_release_regions(dev->pdev);
472 out_no_device:
473         ttm_object_device_release(&dev_priv->tdev);
474 out_err4:
475         iounmap(dev_priv->mmio_virt);
476 out_err3:
477         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
478                      dev_priv->mmio_size, DRM_MTRR_WC);
479         if (dev_priv->has_gmr)
480                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
481         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
482 out_err2:
483         (void)ttm_bo_device_release(&dev_priv->bdev);
484 out_err1:
485         vmw_ttm_global_release(dev_priv);
486 out_err0:
487         idr_destroy(&dev_priv->surface_idr);
488         idr_destroy(&dev_priv->context_idr);
489         idr_destroy(&dev_priv->stream_idr);
490         kfree(dev_priv);
491         return ret;
492 }
493
494 static int vmw_driver_unload(struct drm_device *dev)
495 {
496         struct vmw_private *dev_priv = vmw_priv(dev);
497
498         unregister_pm_notifier(&dev_priv->pm_nb);
499
500         if (dev_priv->ctx.cmd_bounce)
501                 vfree(dev_priv->ctx.cmd_bounce);
502         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
503                 drm_irq_uninstall(dev_priv->dev);
504         if (dev_priv->enable_fb) {
505                 vmw_fb_close(dev_priv);
506                 vmw_kms_restore_vga(dev_priv);
507                 vmw_3d_resource_dec(dev_priv, false);
508         }
509         vmw_kms_close(dev_priv);
510         vmw_overlay_close(dev_priv);
511         if (dev_priv->stealth)
512                 pci_release_region(dev->pdev, 2);
513         else
514                 pci_release_regions(dev->pdev);
515
516         ttm_object_device_release(&dev_priv->tdev);
517         iounmap(dev_priv->mmio_virt);
518         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
519                      dev_priv->mmio_size, DRM_MTRR_WC);
520         if (dev_priv->has_gmr)
521                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
522         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
523         (void)ttm_bo_device_release(&dev_priv->bdev);
524         vmw_ttm_global_release(dev_priv);
525         idr_destroy(&dev_priv->surface_idr);
526         idr_destroy(&dev_priv->context_idr);
527         idr_destroy(&dev_priv->stream_idr);
528
529         kfree(dev_priv);
530
531         return 0;
532 }
533
534 static void vmw_postclose(struct drm_device *dev,
535                          struct drm_file *file_priv)
536 {
537         struct vmw_fpriv *vmw_fp;
538
539         vmw_fp = vmw_fpriv(file_priv);
540         ttm_object_file_release(&vmw_fp->tfile);
541         if (vmw_fp->locked_master)
542                 drm_master_put(&vmw_fp->locked_master);
543         kfree(vmw_fp);
544 }
545
546 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
547 {
548         struct vmw_private *dev_priv = vmw_priv(dev);
549         struct vmw_fpriv *vmw_fp;
550         int ret = -ENOMEM;
551
552         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
553         if (unlikely(vmw_fp == NULL))
554                 return ret;
555
556         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
557         if (unlikely(vmw_fp->tfile == NULL))
558                 goto out_no_tfile;
559
560         file_priv->driver_priv = vmw_fp;
561
562         if (unlikely(dev_priv->bdev.dev_mapping == NULL))
563                 dev_priv->bdev.dev_mapping =
564                         file_priv->filp->f_path.dentry->d_inode->i_mapping;
565
566         return 0;
567
568 out_no_tfile:
569         kfree(vmw_fp);
570         return ret;
571 }
572
573 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
574                                unsigned long arg)
575 {
576         struct drm_file *file_priv = filp->private_data;
577         struct drm_device *dev = file_priv->minor->dev;
578         unsigned int nr = DRM_IOCTL_NR(cmd);
579
580         /*
581          * Do extra checking on driver private ioctls.
582          */
583
584         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
585             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
586                 struct drm_ioctl_desc *ioctl =
587                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
588
589                 if (unlikely(ioctl->cmd_drv != cmd)) {
590                         DRM_ERROR("Invalid command format, ioctl %d\n",
591                                   nr - DRM_COMMAND_BASE);
592                         return -EINVAL;
593                 }
594         }
595
596         return drm_ioctl(filp, cmd, arg);
597 }
598
599 static int vmw_firstopen(struct drm_device *dev)
600 {
601         struct vmw_private *dev_priv = vmw_priv(dev);
602         dev_priv->is_opened = true;
603
604         return 0;
605 }
606
607 static void vmw_lastclose(struct drm_device *dev)
608 {
609         struct vmw_private *dev_priv = vmw_priv(dev);
610         struct drm_crtc *crtc;
611         struct drm_mode_set set;
612         int ret;
613
614         /**
615          * Do nothing on the lastclose call from drm_unload.
616          */
617
618         if (!dev_priv->is_opened)
619                 return;
620
621         dev_priv->is_opened = false;
622         set.x = 0;
623         set.y = 0;
624         set.fb = NULL;
625         set.mode = NULL;
626         set.connectors = NULL;
627         set.num_connectors = 0;
628
629         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
630                 set.crtc = crtc;
631                 ret = crtc->funcs->set_config(&set);
632                 WARN_ON(ret != 0);
633         }
634
635 }
636
637 static void vmw_master_init(struct vmw_master *vmaster)
638 {
639         ttm_lock_init(&vmaster->lock);
640         INIT_LIST_HEAD(&vmaster->fb_surf);
641         mutex_init(&vmaster->fb_surf_mutex);
642 }
643
644 static int vmw_master_create(struct drm_device *dev,
645                              struct drm_master *master)
646 {
647         struct vmw_master *vmaster;
648
649         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
650         if (unlikely(vmaster == NULL))
651                 return -ENOMEM;
652
653         vmw_master_init(vmaster);
654         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
655         master->driver_priv = vmaster;
656
657         return 0;
658 }
659
660 static void vmw_master_destroy(struct drm_device *dev,
661                                struct drm_master *master)
662 {
663         struct vmw_master *vmaster = vmw_master(master);
664
665         master->driver_priv = NULL;
666         kfree(vmaster);
667 }
668
669
670 static int vmw_master_set(struct drm_device *dev,
671                           struct drm_file *file_priv,
672                           bool from_open)
673 {
674         struct vmw_private *dev_priv = vmw_priv(dev);
675         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
676         struct vmw_master *active = dev_priv->active_master;
677         struct vmw_master *vmaster = vmw_master(file_priv->master);
678         int ret = 0;
679
680         if (!dev_priv->enable_fb) {
681                 ret = vmw_3d_resource_inc(dev_priv, true);
682                 if (unlikely(ret != 0))
683                         return ret;
684                 vmw_kms_save_vga(dev_priv);
685                 mutex_lock(&dev_priv->hw_mutex);
686                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
687                 mutex_unlock(&dev_priv->hw_mutex);
688         }
689
690         if (active) {
691                 BUG_ON(active != &dev_priv->fbdev_master);
692                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
693                 if (unlikely(ret != 0))
694                         goto out_no_active_lock;
695
696                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
697                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
698                 if (unlikely(ret != 0)) {
699                         DRM_ERROR("Unable to clean VRAM on "
700                                   "master drop.\n");
701                 }
702
703                 dev_priv->active_master = NULL;
704         }
705
706         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
707         if (!from_open) {
708                 ttm_vt_unlock(&vmaster->lock);
709                 BUG_ON(vmw_fp->locked_master != file_priv->master);
710                 drm_master_put(&vmw_fp->locked_master);
711         }
712
713         dev_priv->active_master = vmaster;
714
715         return 0;
716
717 out_no_active_lock:
718         if (!dev_priv->enable_fb) {
719                 mutex_lock(&dev_priv->hw_mutex);
720                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
721                 mutex_unlock(&dev_priv->hw_mutex);
722                 vmw_kms_restore_vga(dev_priv);
723                 vmw_3d_resource_dec(dev_priv, true);
724         }
725         return ret;
726 }
727
728 static void vmw_master_drop(struct drm_device *dev,
729                             struct drm_file *file_priv,
730                             bool from_release)
731 {
732         struct vmw_private *dev_priv = vmw_priv(dev);
733         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
734         struct vmw_master *vmaster = vmw_master(file_priv->master);
735         int ret;
736
737         /**
738          * Make sure the master doesn't disappear while we have
739          * it locked.
740          */
741
742         vmw_fp->locked_master = drm_master_get(file_priv->master);
743         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
744         vmw_kms_idle_workqueues(vmaster);
745
746         if (unlikely((ret != 0))) {
747                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
748                 drm_master_put(&vmw_fp->locked_master);
749         }
750
751         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
752
753         if (!dev_priv->enable_fb) {
754                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
755                 if (unlikely(ret != 0))
756                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
757                 mutex_lock(&dev_priv->hw_mutex);
758                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
759                 mutex_unlock(&dev_priv->hw_mutex);
760                 vmw_kms_restore_vga(dev_priv);
761                 vmw_3d_resource_dec(dev_priv, true);
762         }
763
764         dev_priv->active_master = &dev_priv->fbdev_master;
765         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
766         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
767
768         if (dev_priv->enable_fb)
769                 vmw_fb_on(dev_priv);
770 }
771
772
773 static void vmw_remove(struct pci_dev *pdev)
774 {
775         struct drm_device *dev = pci_get_drvdata(pdev);
776
777         drm_put_dev(dev);
778 }
779
780 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
781                               void *ptr)
782 {
783         struct vmw_private *dev_priv =
784                 container_of(nb, struct vmw_private, pm_nb);
785         struct vmw_master *vmaster = dev_priv->active_master;
786
787         switch (val) {
788         case PM_HIBERNATION_PREPARE:
789         case PM_SUSPEND_PREPARE:
790                 ttm_suspend_lock(&vmaster->lock);
791
792                 /**
793                  * This empties VRAM and unbinds all GMR bindings.
794                  * Buffer contents is moved to swappable memory.
795                  */
796                 ttm_bo_swapout_all(&dev_priv->bdev);
797
798                 break;
799         case PM_POST_HIBERNATION:
800         case PM_POST_SUSPEND:
801         case PM_POST_RESTORE:
802                 ttm_suspend_unlock(&vmaster->lock);
803
804                 break;
805         case PM_RESTORE_PREPARE:
806                 break;
807         default:
808                 break;
809         }
810         return 0;
811 }
812
813 /**
814  * These might not be needed with the virtual SVGA device.
815  */
816
817 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
818 {
819         struct drm_device *dev = pci_get_drvdata(pdev);
820         struct vmw_private *dev_priv = vmw_priv(dev);
821
822         if (dev_priv->num_3d_resources != 0) {
823                 DRM_INFO("Can't suspend or hibernate "
824                          "while 3D resources are active.\n");
825                 return -EBUSY;
826         }
827
828         pci_save_state(pdev);
829         pci_disable_device(pdev);
830         pci_set_power_state(pdev, PCI_D3hot);
831         return 0;
832 }
833
834 static int vmw_pci_resume(struct pci_dev *pdev)
835 {
836         pci_set_power_state(pdev, PCI_D0);
837         pci_restore_state(pdev);
838         return pci_enable_device(pdev);
839 }
840
841 static int vmw_pm_suspend(struct device *kdev)
842 {
843         struct pci_dev *pdev = to_pci_dev(kdev);
844         struct pm_message dummy;
845
846         dummy.event = 0;
847
848         return vmw_pci_suspend(pdev, dummy);
849 }
850
851 static int vmw_pm_resume(struct device *kdev)
852 {
853         struct pci_dev *pdev = to_pci_dev(kdev);
854
855         return vmw_pci_resume(pdev);
856 }
857
858 static int vmw_pm_prepare(struct device *kdev)
859 {
860         struct pci_dev *pdev = to_pci_dev(kdev);
861         struct drm_device *dev = pci_get_drvdata(pdev);
862         struct vmw_private *dev_priv = vmw_priv(dev);
863
864         /**
865          * Release 3d reference held by fbdev and potentially
866          * stop fifo.
867          */
868         dev_priv->suspended = true;
869         if (dev_priv->enable_fb)
870                         vmw_3d_resource_dec(dev_priv, true);
871
872         if (dev_priv->num_3d_resources != 0) {
873
874                 DRM_INFO("Can't suspend or hibernate "
875                          "while 3D resources are active.\n");
876
877                 if (dev_priv->enable_fb)
878                         vmw_3d_resource_inc(dev_priv, true);
879                 dev_priv->suspended = false;
880                 return -EBUSY;
881         }
882
883         return 0;
884 }
885
886 static void vmw_pm_complete(struct device *kdev)
887 {
888         struct pci_dev *pdev = to_pci_dev(kdev);
889         struct drm_device *dev = pci_get_drvdata(pdev);
890         struct vmw_private *dev_priv = vmw_priv(dev);
891
892         /**
893          * Reclaim 3d reference held by fbdev and potentially
894          * start fifo.
895          */
896         if (dev_priv->enable_fb)
897                         vmw_3d_resource_inc(dev_priv, false);
898
899         dev_priv->suspended = false;
900 }
901
902 static const struct dev_pm_ops vmw_pm_ops = {
903         .prepare = vmw_pm_prepare,
904         .complete = vmw_pm_complete,
905         .suspend = vmw_pm_suspend,
906         .resume = vmw_pm_resume,
907 };
908
909 static struct drm_driver driver = {
910         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
911         DRIVER_MODESET,
912         .load = vmw_driver_load,
913         .unload = vmw_driver_unload,
914         .firstopen = vmw_firstopen,
915         .lastclose = vmw_lastclose,
916         .irq_preinstall = vmw_irq_preinstall,
917         .irq_postinstall = vmw_irq_postinstall,
918         .irq_uninstall = vmw_irq_uninstall,
919         .irq_handler = vmw_irq_handler,
920         .get_vblank_counter = vmw_get_vblank_counter,
921         .reclaim_buffers_locked = NULL,
922         .ioctls = vmw_ioctls,
923         .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
924         .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
925         .master_create = vmw_master_create,
926         .master_destroy = vmw_master_destroy,
927         .master_set = vmw_master_set,
928         .master_drop = vmw_master_drop,
929         .open = vmw_driver_open,
930         .postclose = vmw_postclose,
931         .fops = {
932                  .owner = THIS_MODULE,
933                  .open = drm_open,
934                  .release = drm_release,
935                  .unlocked_ioctl = vmw_unlocked_ioctl,
936                  .mmap = vmw_mmap,
937                  .poll = drm_poll,
938                  .fasync = drm_fasync,
939 #if defined(CONFIG_COMPAT)
940                  .compat_ioctl = drm_compat_ioctl,
941 #endif
942                  .llseek = noop_llseek,
943         },
944         .name = VMWGFX_DRIVER_NAME,
945         .desc = VMWGFX_DRIVER_DESC,
946         .date = VMWGFX_DRIVER_DATE,
947         .major = VMWGFX_DRIVER_MAJOR,
948         .minor = VMWGFX_DRIVER_MINOR,
949         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
950 };
951
952 static struct pci_driver vmw_pci_driver = {
953         .name = VMWGFX_DRIVER_NAME,
954         .id_table = vmw_pci_id_list,
955         .probe = vmw_probe,
956         .remove = vmw_remove,
957         .driver = {
958                 .pm = &vmw_pm_ops
959         }
960 };
961
962 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
963 {
964         return drm_get_pci_dev(pdev, ent, &driver);
965 }
966
967 static int __init vmwgfx_init(void)
968 {
969         int ret;
970         ret = drm_pci_init(&driver, &vmw_pci_driver);
971         if (ret)
972                 DRM_ERROR("Failed initializing DRM.\n");
973         return ret;
974 }
975
976 static void __exit vmwgfx_exit(void)
977 {
978         drm_pci_exit(&driver, &vmw_pci_driver);
979 }
980
981 module_init(vmwgfx_init);
982 module_exit(vmwgfx_exit);
983
984 MODULE_AUTHOR("VMware Inc. and others");
985 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
986 MODULE_LICENSE("GPL and additional rights");
987 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
988                __stringify(VMWGFX_DRIVER_MINOR) "."
989                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
990                "0");