f739fcf35d7481be3b4f35799e909bcda7814dc6
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28
29 #include "drmP.h"
30 #include "vmwgfx_drv.h"
31 #include "ttm/ttm_placement.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_object.h"
34 #include "ttm/ttm_module.h"
35
36 #define VMWGFX_DRIVER_NAME "vmwgfx"
37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38 #define VMWGFX_CHIP_SVGAII 0
39 #define VMW_FB_RESERVATION 0
40
41 /**
42  * Fully encoded drm commands. Might move to vmw_drm.h
43  */
44
45 #define DRM_IOCTL_VMW_GET_PARAM                                 \
46         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
47                  struct drm_vmw_getparam_arg)
48 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
49         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
50                 union drm_vmw_alloc_dmabuf_arg)
51 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
52         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
53                 struct drm_vmw_unref_dmabuf_arg)
54 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
55         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
56                  struct drm_vmw_cursor_bypass_arg)
57
58 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
59         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
60                  struct drm_vmw_control_stream_arg)
61 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
62         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
63                  struct drm_vmw_stream_arg)
64 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
65         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
66                  struct drm_vmw_stream_arg)
67
68 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
69         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
70                 struct drm_vmw_context_arg)
71 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
72         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
73                 struct drm_vmw_context_arg)
74 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
75         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
76                  union drm_vmw_surface_create_arg)
77 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
78         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
79                  struct drm_vmw_surface_arg)
80 #define DRM_IOCTL_VMW_REF_SURFACE                               \
81         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
82                  union drm_vmw_surface_reference_arg)
83 #define DRM_IOCTL_VMW_EXECBUF                                   \
84         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
85                 struct drm_vmw_execbuf_arg)
86 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
87         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
88                  struct drm_vmw_get_3d_cap_arg)
89 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
90         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
91                  struct drm_vmw_fence_wait_arg)
92 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
93         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
94                  struct drm_vmw_fence_signaled_arg)
95 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
96         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
97                  struct drm_vmw_fence_arg)
98 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
99         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
100                  struct drm_vmw_fence_event_arg)
101 #define DRM_IOCTL_VMW_PRESENT                                   \
102         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
103                  struct drm_vmw_present_arg)
104 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
105         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
106                  struct drm_vmw_present_readback_arg)
107 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
108         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
109                  struct drm_vmw_update_layout_arg)
110
111 /**
112  * The core DRM version of this macro doesn't account for
113  * DRM_COMMAND_BASE.
114  */
115
116 #define VMW_IOCTL_DEF(ioctl, func, flags) \
117   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
118
119 /**
120  * Ioctl definitions.
121  */
122
123 static struct drm_ioctl_desc vmw_ioctls[] = {
124         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
125                       DRM_AUTH | DRM_UNLOCKED),
126         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
127                       DRM_AUTH | DRM_UNLOCKED),
128         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
129                       DRM_AUTH | DRM_UNLOCKED),
130         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
131                       vmw_kms_cursor_bypass_ioctl,
132                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
133
134         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
135                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
136         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
137                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
138         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
139                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
140
141         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
142                       DRM_AUTH | DRM_UNLOCKED),
143         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
144                       DRM_AUTH | DRM_UNLOCKED),
145         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
146                       DRM_AUTH | DRM_UNLOCKED),
147         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
148                       DRM_AUTH | DRM_UNLOCKED),
149         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
150                       DRM_AUTH | DRM_UNLOCKED),
151         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
152                       DRM_AUTH | DRM_UNLOCKED),
153         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
154                       DRM_AUTH | DRM_UNLOCKED),
155         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
156                       vmw_fence_obj_signaled_ioctl,
157                       DRM_AUTH | DRM_UNLOCKED),
158         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
159                       DRM_AUTH | DRM_UNLOCKED),
160         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
161                       vmw_fence_event_ioctl,
162                       DRM_AUTH | DRM_UNLOCKED),
163         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
164                       DRM_AUTH | DRM_UNLOCKED),
165
166         /* these allow direct access to the framebuffers mark as master only */
167         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
168                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
169         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
170                       vmw_present_readback_ioctl,
171                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
172         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
173                       vmw_kms_update_layout_ioctl,
174                       DRM_MASTER | DRM_UNLOCKED),
175 };
176
177 static struct pci_device_id vmw_pci_id_list[] = {
178         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
179         {0, 0, 0}
180 };
181 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
182
183 static int enable_fbdev;
184
185 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
186 static void vmw_master_init(struct vmw_master *);
187 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
188                               void *ptr);
189
190 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
191 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
192
193 static void vmw_print_capabilities(uint32_t capabilities)
194 {
195         DRM_INFO("Capabilities:\n");
196         if (capabilities & SVGA_CAP_RECT_COPY)
197                 DRM_INFO("  Rect copy.\n");
198         if (capabilities & SVGA_CAP_CURSOR)
199                 DRM_INFO("  Cursor.\n");
200         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
201                 DRM_INFO("  Cursor bypass.\n");
202         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
203                 DRM_INFO("  Cursor bypass 2.\n");
204         if (capabilities & SVGA_CAP_8BIT_EMULATION)
205                 DRM_INFO("  8bit emulation.\n");
206         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
207                 DRM_INFO("  Alpha cursor.\n");
208         if (capabilities & SVGA_CAP_3D)
209                 DRM_INFO("  3D.\n");
210         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
211                 DRM_INFO("  Extended Fifo.\n");
212         if (capabilities & SVGA_CAP_MULTIMON)
213                 DRM_INFO("  Multimon.\n");
214         if (capabilities & SVGA_CAP_PITCHLOCK)
215                 DRM_INFO("  Pitchlock.\n");
216         if (capabilities & SVGA_CAP_IRQMASK)
217                 DRM_INFO("  Irq mask.\n");
218         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
219                 DRM_INFO("  Display Topology.\n");
220         if (capabilities & SVGA_CAP_GMR)
221                 DRM_INFO("  GMR.\n");
222         if (capabilities & SVGA_CAP_TRACES)
223                 DRM_INFO("  Traces.\n");
224         if (capabilities & SVGA_CAP_GMR2)
225                 DRM_INFO("  GMR2.\n");
226         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
227                 DRM_INFO("  Screen Object 2.\n");
228 }
229
230
231 /**
232  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
233  * the start of a buffer object.
234  *
235  * @dev_priv: The device private structure.
236  *
237  * This function will idle the buffer using an uninterruptible wait, then
238  * map the first page and initialize a pending occlusion query result structure,
239  * Finally it will unmap the buffer.
240  *
241  * TODO: Since we're only mapping a single page, we should optimize the map
242  * to use kmap_atomic / iomap_atomic.
243  */
244 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
245 {
246         struct ttm_bo_kmap_obj map;
247         volatile SVGA3dQueryResult *result;
248         bool dummy;
249         int ret;
250         struct ttm_bo_device *bdev = &dev_priv->bdev;
251         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
252
253         ttm_bo_reserve(bo, false, false, false, 0);
254         spin_lock(&bdev->fence_lock);
255         ret = ttm_bo_wait(bo, false, false, false);
256         spin_unlock(&bdev->fence_lock);
257         if (unlikely(ret != 0))
258                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
259                                          10*HZ);
260
261         ret = ttm_bo_kmap(bo, 0, 1, &map);
262         if (likely(ret == 0)) {
263                 result = ttm_kmap_obj_virtual(&map, &dummy);
264                 result->totalSize = sizeof(*result);
265                 result->state = SVGA3D_QUERYSTATE_PENDING;
266                 result->result32 = 0xff;
267                 ttm_bo_kunmap(&map);
268         } else
269                 DRM_ERROR("Dummy query buffer map failed.\n");
270         ttm_bo_unreserve(bo);
271 }
272
273
274 /**
275  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
276  *
277  * @dev_priv: A device private structure.
278  *
279  * This function creates a small buffer object that holds the query
280  * result for dummy queries emitted as query barriers.
281  * No interruptible waits are done within this function.
282  *
283  * Returns an error if bo creation fails.
284  */
285 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
286 {
287         return ttm_bo_create(&dev_priv->bdev,
288                              PAGE_SIZE,
289                              ttm_bo_type_device,
290                              &vmw_vram_sys_placement,
291                              0, 0, false, NULL,
292                              &dev_priv->dummy_query_bo);
293 }
294
295
296 static int vmw_request_device(struct vmw_private *dev_priv)
297 {
298         int ret;
299
300         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
301         if (unlikely(ret != 0)) {
302                 DRM_ERROR("Unable to initialize FIFO.\n");
303                 return ret;
304         }
305         vmw_fence_fifo_up(dev_priv->fman);
306         ret = vmw_dummy_query_bo_create(dev_priv);
307         if (unlikely(ret != 0))
308                 goto out_no_query_bo;
309         vmw_dummy_query_bo_prepare(dev_priv);
310
311         return 0;
312
313 out_no_query_bo:
314         vmw_fence_fifo_down(dev_priv->fman);
315         vmw_fifo_release(dev_priv, &dev_priv->fifo);
316         return ret;
317 }
318
319 static void vmw_release_device(struct vmw_private *dev_priv)
320 {
321         /*
322          * Previous destructions should've released
323          * the pinned bo.
324          */
325
326         BUG_ON(dev_priv->pinned_bo != NULL);
327
328         ttm_bo_unref(&dev_priv->dummy_query_bo);
329         vmw_fence_fifo_down(dev_priv->fman);
330         vmw_fifo_release(dev_priv, &dev_priv->fifo);
331 }
332
333 /**
334  * Increase the 3d resource refcount.
335  * If the count was prevously zero, initialize the fifo, switching to svga
336  * mode. Note that the master holds a ref as well, and may request an
337  * explicit switch to svga mode if fb is not running, using @unhide_svga.
338  */
339 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
340                         bool unhide_svga)
341 {
342         int ret = 0;
343
344         mutex_lock(&dev_priv->release_mutex);
345         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
346                 ret = vmw_request_device(dev_priv);
347                 if (unlikely(ret != 0))
348                         --dev_priv->num_3d_resources;
349         } else if (unhide_svga) {
350                 mutex_lock(&dev_priv->hw_mutex);
351                 vmw_write(dev_priv, SVGA_REG_ENABLE,
352                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
353                           ~SVGA_REG_ENABLE_HIDE);
354                 mutex_unlock(&dev_priv->hw_mutex);
355         }
356
357         mutex_unlock(&dev_priv->release_mutex);
358         return ret;
359 }
360
361 /**
362  * Decrease the 3d resource refcount.
363  * If the count reaches zero, disable the fifo, switching to vga mode.
364  * Note that the master holds a refcount as well, and may request an
365  * explicit switch to vga mode when it releases its refcount to account
366  * for the situation of an X server vt switch to VGA with 3d resources
367  * active.
368  */
369 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
370                          bool hide_svga)
371 {
372         int32_t n3d;
373
374         mutex_lock(&dev_priv->release_mutex);
375         if (unlikely(--dev_priv->num_3d_resources == 0))
376                 vmw_release_device(dev_priv);
377         else if (hide_svga) {
378                 mutex_lock(&dev_priv->hw_mutex);
379                 vmw_write(dev_priv, SVGA_REG_ENABLE,
380                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
381                           SVGA_REG_ENABLE_HIDE);
382                 mutex_unlock(&dev_priv->hw_mutex);
383         }
384
385         n3d = (int32_t) dev_priv->num_3d_resources;
386         mutex_unlock(&dev_priv->release_mutex);
387
388         BUG_ON(n3d < 0);
389 }
390
391 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
392 {
393         struct vmw_private *dev_priv;
394         int ret;
395         uint32_t svga_id;
396
397         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
398         if (unlikely(dev_priv == NULL)) {
399                 DRM_ERROR("Failed allocating a device private struct.\n");
400                 return -ENOMEM;
401         }
402         memset(dev_priv, 0, sizeof(*dev_priv));
403
404         dev_priv->dev = dev;
405         dev_priv->vmw_chipset = chipset;
406         dev_priv->last_read_seqno = (uint32_t) -100;
407         mutex_init(&dev_priv->hw_mutex);
408         mutex_init(&dev_priv->cmdbuf_mutex);
409         mutex_init(&dev_priv->release_mutex);
410         rwlock_init(&dev_priv->resource_lock);
411         idr_init(&dev_priv->context_idr);
412         idr_init(&dev_priv->surface_idr);
413         idr_init(&dev_priv->stream_idr);
414         mutex_init(&dev_priv->init_mutex);
415         init_waitqueue_head(&dev_priv->fence_queue);
416         init_waitqueue_head(&dev_priv->fifo_queue);
417         dev_priv->fence_queue_waiters = 0;
418         atomic_set(&dev_priv->fifo_queue_waiters, 0);
419         INIT_LIST_HEAD(&dev_priv->surface_lru);
420         dev_priv->used_memory_size = 0;
421
422         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
423         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
424         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
425
426         dev_priv->enable_fb = enable_fbdev;
427
428         mutex_lock(&dev_priv->hw_mutex);
429
430         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
431         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
432         if (svga_id != SVGA_ID_2) {
433                 ret = -ENOSYS;
434                 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
435                 mutex_unlock(&dev_priv->hw_mutex);
436                 goto out_err0;
437         }
438
439         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
440
441         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
442         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
443         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
444         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
445         if (dev_priv->capabilities & SVGA_CAP_GMR) {
446                 dev_priv->max_gmr_descriptors =
447                         vmw_read(dev_priv,
448                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
449                 dev_priv->max_gmr_ids =
450                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
451         }
452         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
453                 dev_priv->max_gmr_pages =
454                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
455                 dev_priv->memory_size =
456                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
457                 dev_priv->memory_size -= dev_priv->vram_size;
458         } else {
459                 /*
460                  * An arbitrary limit of 512MiB on surface
461                  * memory. But all HWV8 hardware supports GMR2.
462                  */
463                 dev_priv->memory_size = 512*1024*1024;
464         }
465
466         mutex_unlock(&dev_priv->hw_mutex);
467
468         vmw_print_capabilities(dev_priv->capabilities);
469
470         if (dev_priv->capabilities & SVGA_CAP_GMR) {
471                 DRM_INFO("Max GMR ids is %u\n",
472                          (unsigned)dev_priv->max_gmr_ids);
473                 DRM_INFO("Max GMR descriptors is %u\n",
474                          (unsigned)dev_priv->max_gmr_descriptors);
475         }
476         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
477                 DRM_INFO("Max number of GMR pages is %u\n",
478                          (unsigned)dev_priv->max_gmr_pages);
479                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
480                          (unsigned)dev_priv->memory_size / 1024);
481         }
482         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
483                  dev_priv->vram_start, dev_priv->vram_size / 1024);
484         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
485                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
486
487         ret = vmw_ttm_global_init(dev_priv);
488         if (unlikely(ret != 0))
489                 goto out_err0;
490
491
492         vmw_master_init(&dev_priv->fbdev_master);
493         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
494         dev_priv->active_master = &dev_priv->fbdev_master;
495
496
497         ret = ttm_bo_device_init(&dev_priv->bdev,
498                                  dev_priv->bo_global_ref.ref.object,
499                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
500                                  false);
501         if (unlikely(ret != 0)) {
502                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
503                 goto out_err1;
504         }
505
506         dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
507                                            dev_priv->mmio_size, DRM_MTRR_WC);
508
509         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
510                                          dev_priv->mmio_size);
511
512         if (unlikely(dev_priv->mmio_virt == NULL)) {
513                 ret = -ENOMEM;
514                 DRM_ERROR("Failed mapping MMIO.\n");
515                 goto out_err3;
516         }
517
518         /* Need mmio memory to check for fifo pitchlock cap. */
519         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
520             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
521             !vmw_fifo_have_pitchlock(dev_priv)) {
522                 ret = -ENOSYS;
523                 DRM_ERROR("Hardware has no pitchlock\n");
524                 goto out_err4;
525         }
526
527         dev_priv->tdev = ttm_object_device_init
528             (dev_priv->mem_global_ref.object, 12);
529
530         if (unlikely(dev_priv->tdev == NULL)) {
531                 DRM_ERROR("Unable to initialize TTM object management.\n");
532                 ret = -ENOMEM;
533                 goto out_err4;
534         }
535
536         dev->dev_private = dev_priv;
537
538         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
539         dev_priv->stealth = (ret != 0);
540         if (dev_priv->stealth) {
541                 /**
542                  * Request at least the mmio PCI resource.
543                  */
544
545                 DRM_INFO("It appears like vesafb is loaded. "
546                          "Ignore above error if any.\n");
547                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
548                 if (unlikely(ret != 0)) {
549                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
550                         goto out_no_device;
551                 }
552         }
553
554         dev_priv->fman = vmw_fence_manager_init(dev_priv);
555         if (unlikely(dev_priv->fman == NULL))
556                 goto out_no_fman;
557
558
559         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
560                              (dev_priv->vram_size >> PAGE_SHIFT));
561         if (unlikely(ret != 0)) {
562                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
563                 goto out_no_vram;
564         }
565
566         dev_priv->has_gmr = true;
567         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
568                            dev_priv->max_gmr_ids) != 0) {
569                 DRM_INFO("No GMR memory available. "
570                          "Graphics memory resources are very limited.\n");
571                 dev_priv->has_gmr = false;
572         }
573
574         /* Need to start the fifo to check if we can do screen objects */
575         ret = vmw_3d_resource_inc(dev_priv, true);
576         if (unlikely(ret != 0))
577                 goto out_no_fifo;
578         vmw_kms_save_vga(dev_priv);
579
580         /* Start kms and overlay systems, needs fifo. */
581         ret = vmw_kms_init(dev_priv);
582         if (unlikely(ret != 0))
583                 goto out_no_kms;
584         vmw_overlay_init(dev_priv);
585
586         /* 3D Depends on Screen Objects being used. */
587         DRM_INFO("Detected %sdevice 3D availability.\n",
588                  vmw_fifo_have_3d(dev_priv) ?
589                  "" : "no ");
590
591         /* We might be done with the fifo now */
592         if (dev_priv->enable_fb) {
593                 vmw_fb_init(dev_priv);
594         } else {
595                 vmw_kms_restore_vga(dev_priv);
596                 vmw_3d_resource_dec(dev_priv, true);
597         }
598
599         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
600                 ret = drm_irq_install(dev);
601                 if (unlikely(ret != 0)) {
602                         DRM_ERROR("Failed installing irq: %d\n", ret);
603                         goto out_no_irq;
604                 }
605         }
606
607         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
608         register_pm_notifier(&dev_priv->pm_nb);
609
610         return 0;
611
612 out_no_irq:
613         if (dev_priv->enable_fb)
614                 vmw_fb_close(dev_priv);
615         vmw_overlay_close(dev_priv);
616         vmw_kms_close(dev_priv);
617 out_no_kms:
618         /* We still have a 3D resource reference held */
619         if (dev_priv->enable_fb) {
620                 vmw_kms_restore_vga(dev_priv);
621                 vmw_3d_resource_dec(dev_priv, false);
622         }
623 out_no_fifo:
624         if (dev_priv->has_gmr)
625                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
626         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
627 out_no_vram:
628         vmw_fence_manager_takedown(dev_priv->fman);
629 out_no_fman:
630         if (dev_priv->stealth)
631                 pci_release_region(dev->pdev, 2);
632         else
633                 pci_release_regions(dev->pdev);
634 out_no_device:
635         ttm_object_device_release(&dev_priv->tdev);
636 out_err4:
637         iounmap(dev_priv->mmio_virt);
638 out_err3:
639         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
640                      dev_priv->mmio_size, DRM_MTRR_WC);
641         (void)ttm_bo_device_release(&dev_priv->bdev);
642 out_err1:
643         vmw_ttm_global_release(dev_priv);
644 out_err0:
645         idr_destroy(&dev_priv->surface_idr);
646         idr_destroy(&dev_priv->context_idr);
647         idr_destroy(&dev_priv->stream_idr);
648         kfree(dev_priv);
649         return ret;
650 }
651
652 static int vmw_driver_unload(struct drm_device *dev)
653 {
654         struct vmw_private *dev_priv = vmw_priv(dev);
655
656         unregister_pm_notifier(&dev_priv->pm_nb);
657
658         if (dev_priv->ctx.cmd_bounce)
659                 vfree(dev_priv->ctx.cmd_bounce);
660         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
661                 drm_irq_uninstall(dev_priv->dev);
662         if (dev_priv->enable_fb) {
663                 vmw_fb_close(dev_priv);
664                 vmw_kms_restore_vga(dev_priv);
665                 vmw_3d_resource_dec(dev_priv, false);
666         }
667         vmw_kms_close(dev_priv);
668         vmw_overlay_close(dev_priv);
669
670         if (dev_priv->has_gmr)
671                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
672         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
673
674         vmw_fence_manager_takedown(dev_priv->fman);
675         if (dev_priv->stealth)
676                 pci_release_region(dev->pdev, 2);
677         else
678                 pci_release_regions(dev->pdev);
679
680         ttm_object_device_release(&dev_priv->tdev);
681         iounmap(dev_priv->mmio_virt);
682         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
683                      dev_priv->mmio_size, DRM_MTRR_WC);
684         (void)ttm_bo_device_release(&dev_priv->bdev);
685         vmw_ttm_global_release(dev_priv);
686         idr_destroy(&dev_priv->surface_idr);
687         idr_destroy(&dev_priv->context_idr);
688         idr_destroy(&dev_priv->stream_idr);
689
690         kfree(dev_priv);
691
692         return 0;
693 }
694
695 static void vmw_postclose(struct drm_device *dev,
696                          struct drm_file *file_priv)
697 {
698         struct vmw_fpriv *vmw_fp;
699
700         vmw_fp = vmw_fpriv(file_priv);
701         ttm_object_file_release(&vmw_fp->tfile);
702         if (vmw_fp->locked_master)
703                 drm_master_put(&vmw_fp->locked_master);
704         kfree(vmw_fp);
705 }
706
707 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
708 {
709         struct vmw_private *dev_priv = vmw_priv(dev);
710         struct vmw_fpriv *vmw_fp;
711         int ret = -ENOMEM;
712
713         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
714         if (unlikely(vmw_fp == NULL))
715                 return ret;
716
717         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
718         if (unlikely(vmw_fp->tfile == NULL))
719                 goto out_no_tfile;
720
721         file_priv->driver_priv = vmw_fp;
722
723         if (unlikely(dev_priv->bdev.dev_mapping == NULL))
724                 dev_priv->bdev.dev_mapping =
725                         file_priv->filp->f_path.dentry->d_inode->i_mapping;
726
727         return 0;
728
729 out_no_tfile:
730         kfree(vmw_fp);
731         return ret;
732 }
733
734 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
735                                unsigned long arg)
736 {
737         struct drm_file *file_priv = filp->private_data;
738         struct drm_device *dev = file_priv->minor->dev;
739         unsigned int nr = DRM_IOCTL_NR(cmd);
740
741         /*
742          * Do extra checking on driver private ioctls.
743          */
744
745         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
746             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
747                 struct drm_ioctl_desc *ioctl =
748                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
749
750                 if (unlikely(ioctl->cmd_drv != cmd)) {
751                         DRM_ERROR("Invalid command format, ioctl %d\n",
752                                   nr - DRM_COMMAND_BASE);
753                         return -EINVAL;
754                 }
755         }
756
757         return drm_ioctl(filp, cmd, arg);
758 }
759
760 static int vmw_firstopen(struct drm_device *dev)
761 {
762         struct vmw_private *dev_priv = vmw_priv(dev);
763         dev_priv->is_opened = true;
764
765         return 0;
766 }
767
768 static void vmw_lastclose(struct drm_device *dev)
769 {
770         struct vmw_private *dev_priv = vmw_priv(dev);
771         struct drm_crtc *crtc;
772         struct drm_mode_set set;
773         int ret;
774
775         /**
776          * Do nothing on the lastclose call from drm_unload.
777          */
778
779         if (!dev_priv->is_opened)
780                 return;
781
782         dev_priv->is_opened = false;
783         set.x = 0;
784         set.y = 0;
785         set.fb = NULL;
786         set.mode = NULL;
787         set.connectors = NULL;
788         set.num_connectors = 0;
789
790         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
791                 set.crtc = crtc;
792                 ret = crtc->funcs->set_config(&set);
793                 WARN_ON(ret != 0);
794         }
795
796 }
797
798 static void vmw_master_init(struct vmw_master *vmaster)
799 {
800         ttm_lock_init(&vmaster->lock);
801         INIT_LIST_HEAD(&vmaster->fb_surf);
802         mutex_init(&vmaster->fb_surf_mutex);
803 }
804
805 static int vmw_master_create(struct drm_device *dev,
806                              struct drm_master *master)
807 {
808         struct vmw_master *vmaster;
809
810         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
811         if (unlikely(vmaster == NULL))
812                 return -ENOMEM;
813
814         vmw_master_init(vmaster);
815         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
816         master->driver_priv = vmaster;
817
818         return 0;
819 }
820
821 static void vmw_master_destroy(struct drm_device *dev,
822                                struct drm_master *master)
823 {
824         struct vmw_master *vmaster = vmw_master(master);
825
826         master->driver_priv = NULL;
827         kfree(vmaster);
828 }
829
830
831 static int vmw_master_set(struct drm_device *dev,
832                           struct drm_file *file_priv,
833                           bool from_open)
834 {
835         struct vmw_private *dev_priv = vmw_priv(dev);
836         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
837         struct vmw_master *active = dev_priv->active_master;
838         struct vmw_master *vmaster = vmw_master(file_priv->master);
839         int ret = 0;
840
841         if (!dev_priv->enable_fb) {
842                 ret = vmw_3d_resource_inc(dev_priv, true);
843                 if (unlikely(ret != 0))
844                         return ret;
845                 vmw_kms_save_vga(dev_priv);
846                 mutex_lock(&dev_priv->hw_mutex);
847                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
848                 mutex_unlock(&dev_priv->hw_mutex);
849         }
850
851         if (active) {
852                 BUG_ON(active != &dev_priv->fbdev_master);
853                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
854                 if (unlikely(ret != 0))
855                         goto out_no_active_lock;
856
857                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
858                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
859                 if (unlikely(ret != 0)) {
860                         DRM_ERROR("Unable to clean VRAM on "
861                                   "master drop.\n");
862                 }
863
864                 dev_priv->active_master = NULL;
865         }
866
867         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
868         if (!from_open) {
869                 ttm_vt_unlock(&vmaster->lock);
870                 BUG_ON(vmw_fp->locked_master != file_priv->master);
871                 drm_master_put(&vmw_fp->locked_master);
872         }
873
874         dev_priv->active_master = vmaster;
875
876         return 0;
877
878 out_no_active_lock:
879         if (!dev_priv->enable_fb) {
880                 mutex_lock(&dev_priv->hw_mutex);
881                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
882                 mutex_unlock(&dev_priv->hw_mutex);
883                 vmw_kms_restore_vga(dev_priv);
884                 vmw_3d_resource_dec(dev_priv, true);
885         }
886         return ret;
887 }
888
889 static void vmw_master_drop(struct drm_device *dev,
890                             struct drm_file *file_priv,
891                             bool from_release)
892 {
893         struct vmw_private *dev_priv = vmw_priv(dev);
894         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
895         struct vmw_master *vmaster = vmw_master(file_priv->master);
896         int ret;
897
898         /**
899          * Make sure the master doesn't disappear while we have
900          * it locked.
901          */
902
903         vmw_fp->locked_master = drm_master_get(file_priv->master);
904         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
905         vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
906
907         if (unlikely((ret != 0))) {
908                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
909                 drm_master_put(&vmw_fp->locked_master);
910         }
911
912         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
913
914         if (!dev_priv->enable_fb) {
915                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
916                 if (unlikely(ret != 0))
917                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
918                 mutex_lock(&dev_priv->hw_mutex);
919                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
920                 mutex_unlock(&dev_priv->hw_mutex);
921                 vmw_kms_restore_vga(dev_priv);
922                 vmw_3d_resource_dec(dev_priv, true);
923         }
924
925         dev_priv->active_master = &dev_priv->fbdev_master;
926         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
927         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
928
929         if (dev_priv->enable_fb)
930                 vmw_fb_on(dev_priv);
931 }
932
933
934 static void vmw_remove(struct pci_dev *pdev)
935 {
936         struct drm_device *dev = pci_get_drvdata(pdev);
937
938         drm_put_dev(dev);
939 }
940
941 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
942                               void *ptr)
943 {
944         struct vmw_private *dev_priv =
945                 container_of(nb, struct vmw_private, pm_nb);
946         struct vmw_master *vmaster = dev_priv->active_master;
947
948         switch (val) {
949         case PM_HIBERNATION_PREPARE:
950         case PM_SUSPEND_PREPARE:
951                 ttm_suspend_lock(&vmaster->lock);
952
953                 /**
954                  * This empties VRAM and unbinds all GMR bindings.
955                  * Buffer contents is moved to swappable memory.
956                  */
957                 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
958                 ttm_bo_swapout_all(&dev_priv->bdev);
959
960                 break;
961         case PM_POST_HIBERNATION:
962         case PM_POST_SUSPEND:
963         case PM_POST_RESTORE:
964                 ttm_suspend_unlock(&vmaster->lock);
965
966                 break;
967         case PM_RESTORE_PREPARE:
968                 break;
969         default:
970                 break;
971         }
972         return 0;
973 }
974
975 /**
976  * These might not be needed with the virtual SVGA device.
977  */
978
979 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
980 {
981         struct drm_device *dev = pci_get_drvdata(pdev);
982         struct vmw_private *dev_priv = vmw_priv(dev);
983
984         if (dev_priv->num_3d_resources != 0) {
985                 DRM_INFO("Can't suspend or hibernate "
986                          "while 3D resources are active.\n");
987                 return -EBUSY;
988         }
989
990         pci_save_state(pdev);
991         pci_disable_device(pdev);
992         pci_set_power_state(pdev, PCI_D3hot);
993         return 0;
994 }
995
996 static int vmw_pci_resume(struct pci_dev *pdev)
997 {
998         pci_set_power_state(pdev, PCI_D0);
999         pci_restore_state(pdev);
1000         return pci_enable_device(pdev);
1001 }
1002
1003 static int vmw_pm_suspend(struct device *kdev)
1004 {
1005         struct pci_dev *pdev = to_pci_dev(kdev);
1006         struct pm_message dummy;
1007
1008         dummy.event = 0;
1009
1010         return vmw_pci_suspend(pdev, dummy);
1011 }
1012
1013 static int vmw_pm_resume(struct device *kdev)
1014 {
1015         struct pci_dev *pdev = to_pci_dev(kdev);
1016
1017         return vmw_pci_resume(pdev);
1018 }
1019
1020 static int vmw_pm_prepare(struct device *kdev)
1021 {
1022         struct pci_dev *pdev = to_pci_dev(kdev);
1023         struct drm_device *dev = pci_get_drvdata(pdev);
1024         struct vmw_private *dev_priv = vmw_priv(dev);
1025
1026         /**
1027          * Release 3d reference held by fbdev and potentially
1028          * stop fifo.
1029          */
1030         dev_priv->suspended = true;
1031         if (dev_priv->enable_fb)
1032                         vmw_3d_resource_dec(dev_priv, true);
1033
1034         if (dev_priv->num_3d_resources != 0) {
1035
1036                 DRM_INFO("Can't suspend or hibernate "
1037                          "while 3D resources are active.\n");
1038
1039                 if (dev_priv->enable_fb)
1040                         vmw_3d_resource_inc(dev_priv, true);
1041                 dev_priv->suspended = false;
1042                 return -EBUSY;
1043         }
1044
1045         return 0;
1046 }
1047
1048 static void vmw_pm_complete(struct device *kdev)
1049 {
1050         struct pci_dev *pdev = to_pci_dev(kdev);
1051         struct drm_device *dev = pci_get_drvdata(pdev);
1052         struct vmw_private *dev_priv = vmw_priv(dev);
1053
1054         mutex_lock(&dev_priv->hw_mutex);
1055         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1056         (void) vmw_read(dev_priv, SVGA_REG_ID);
1057         mutex_unlock(&dev_priv->hw_mutex);
1058
1059         /**
1060          * Reclaim 3d reference held by fbdev and potentially
1061          * start fifo.
1062          */
1063         if (dev_priv->enable_fb)
1064                         vmw_3d_resource_inc(dev_priv, false);
1065
1066         dev_priv->suspended = false;
1067 }
1068
1069 static const struct dev_pm_ops vmw_pm_ops = {
1070         .prepare = vmw_pm_prepare,
1071         .complete = vmw_pm_complete,
1072         .suspend = vmw_pm_suspend,
1073         .resume = vmw_pm_resume,
1074 };
1075
1076 static struct drm_driver driver = {
1077         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1078         DRIVER_MODESET,
1079         .load = vmw_driver_load,
1080         .unload = vmw_driver_unload,
1081         .firstopen = vmw_firstopen,
1082         .lastclose = vmw_lastclose,
1083         .irq_preinstall = vmw_irq_preinstall,
1084         .irq_postinstall = vmw_irq_postinstall,
1085         .irq_uninstall = vmw_irq_uninstall,
1086         .irq_handler = vmw_irq_handler,
1087         .get_vblank_counter = vmw_get_vblank_counter,
1088         .enable_vblank = vmw_enable_vblank,
1089         .disable_vblank = vmw_disable_vblank,
1090         .reclaim_buffers_locked = NULL,
1091         .ioctls = vmw_ioctls,
1092         .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1093         .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
1094         .master_create = vmw_master_create,
1095         .master_destroy = vmw_master_destroy,
1096         .master_set = vmw_master_set,
1097         .master_drop = vmw_master_drop,
1098         .open = vmw_driver_open,
1099         .postclose = vmw_postclose,
1100
1101         .dumb_create = vmw_dumb_create,
1102         .dumb_map_offset = vmw_dumb_map_offset,
1103         .dumb_destroy = vmw_dumb_destroy,
1104
1105         .fops = {
1106                  .owner = THIS_MODULE,
1107                  .open = drm_open,
1108                  .release = drm_release,
1109                  .unlocked_ioctl = vmw_unlocked_ioctl,
1110                  .mmap = vmw_mmap,
1111                  .poll = vmw_fops_poll,
1112                  .read = vmw_fops_read,
1113                  .fasync = drm_fasync,
1114 #if defined(CONFIG_COMPAT)
1115                  .compat_ioctl = drm_compat_ioctl,
1116 #endif
1117                  .llseek = noop_llseek,
1118         },
1119         .name = VMWGFX_DRIVER_NAME,
1120         .desc = VMWGFX_DRIVER_DESC,
1121         .date = VMWGFX_DRIVER_DATE,
1122         .major = VMWGFX_DRIVER_MAJOR,
1123         .minor = VMWGFX_DRIVER_MINOR,
1124         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1125 };
1126
1127 static struct pci_driver vmw_pci_driver = {
1128         .name = VMWGFX_DRIVER_NAME,
1129         .id_table = vmw_pci_id_list,
1130         .probe = vmw_probe,
1131         .remove = vmw_remove,
1132         .driver = {
1133                 .pm = &vmw_pm_ops
1134         }
1135 };
1136
1137 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1138 {
1139         return drm_get_pci_dev(pdev, ent, &driver);
1140 }
1141
1142 static int __init vmwgfx_init(void)
1143 {
1144         int ret;
1145         ret = drm_pci_init(&driver, &vmw_pci_driver);
1146         if (ret)
1147                 DRM_ERROR("Failed initializing DRM.\n");
1148         return ret;
1149 }
1150
1151 static void __exit vmwgfx_exit(void)
1152 {
1153         drm_pci_exit(&driver, &vmw_pci_driver);
1154 }
1155
1156 module_init(vmwgfx_init);
1157 module_exit(vmwgfx_exit);
1158
1159 MODULE_AUTHOR("VMware Inc. and others");
1160 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1161 MODULE_LICENSE("GPL and additional rights");
1162 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1163                __stringify(VMWGFX_DRIVER_MINOR) "."
1164                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1165                "0");