drm/vmwgfx: add MODULE_DEVICE_TABLE so vmwgfx loads at boot
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28
29 #include "drmP.h"
30 #include "vmwgfx_drv.h"
31 #include "ttm/ttm_placement.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_object.h"
34 #include "ttm/ttm_module.h"
35
36 #define VMWGFX_DRIVER_NAME "vmwgfx"
37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38 #define VMWGFX_CHIP_SVGAII 0
39 #define VMW_FB_RESERVATION 0
40
41 /**
42  * Fully encoded drm commands. Might move to vmw_drm.h
43  */
44
45 #define DRM_IOCTL_VMW_GET_PARAM                                 \
46         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
47                  struct drm_vmw_getparam_arg)
48 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
49         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
50                 union drm_vmw_alloc_dmabuf_arg)
51 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
52         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
53                 struct drm_vmw_unref_dmabuf_arg)
54 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
55         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
56                  struct drm_vmw_cursor_bypass_arg)
57
58 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
59         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
60                  struct drm_vmw_control_stream_arg)
61 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
62         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
63                  struct drm_vmw_stream_arg)
64 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
65         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
66                  struct drm_vmw_stream_arg)
67
68 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
69         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
70                 struct drm_vmw_context_arg)
71 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
72         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
73                 struct drm_vmw_context_arg)
74 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
75         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
76                  union drm_vmw_surface_create_arg)
77 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
78         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
79                  struct drm_vmw_surface_arg)
80 #define DRM_IOCTL_VMW_REF_SURFACE                               \
81         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
82                  union drm_vmw_surface_reference_arg)
83 #define DRM_IOCTL_VMW_EXECBUF                                   \
84         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
85                 struct drm_vmw_execbuf_arg)
86 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
87         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
88                  struct drm_vmw_get_3d_cap_arg)
89 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
90         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
91                  struct drm_vmw_fence_wait_arg)
92 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
93         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
94                  struct drm_vmw_fence_signaled_arg)
95 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
96         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
97                  struct drm_vmw_fence_arg)
98 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
99         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
100                  struct drm_vmw_fence_event_arg)
101 #define DRM_IOCTL_VMW_PRESENT                                   \
102         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
103                  struct drm_vmw_present_arg)
104 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
105         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
106                  struct drm_vmw_present_readback_arg)
107 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
108         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
109                  struct drm_vmw_update_layout_arg)
110
111 /**
112  * The core DRM version of this macro doesn't account for
113  * DRM_COMMAND_BASE.
114  */
115
116 #define VMW_IOCTL_DEF(ioctl, func, flags) \
117   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
118
119 /**
120  * Ioctl definitions.
121  */
122
123 static struct drm_ioctl_desc vmw_ioctls[] = {
124         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
125                       DRM_AUTH | DRM_UNLOCKED),
126         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
127                       DRM_AUTH | DRM_UNLOCKED),
128         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
129                       DRM_AUTH | DRM_UNLOCKED),
130         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
131                       vmw_kms_cursor_bypass_ioctl,
132                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
133
134         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
135                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
136         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
137                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
138         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
139                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
140
141         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
142                       DRM_AUTH | DRM_UNLOCKED),
143         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
144                       DRM_AUTH | DRM_UNLOCKED),
145         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
146                       DRM_AUTH | DRM_UNLOCKED),
147         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
148                       DRM_AUTH | DRM_UNLOCKED),
149         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
150                       DRM_AUTH | DRM_UNLOCKED),
151         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
152                       DRM_AUTH | DRM_UNLOCKED),
153         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
154                       DRM_AUTH | DRM_UNLOCKED),
155         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
156                       vmw_fence_obj_signaled_ioctl,
157                       DRM_AUTH | DRM_UNLOCKED),
158         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
159                       DRM_AUTH | DRM_UNLOCKED),
160         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
161                       vmw_fence_event_ioctl,
162                       DRM_AUTH | DRM_UNLOCKED),
163         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
164                       DRM_AUTH | DRM_UNLOCKED),
165
166         /* these allow direct access to the framebuffers mark as master only */
167         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
168                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
169         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
170                       vmw_present_readback_ioctl,
171                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
172         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
173                       vmw_kms_update_layout_ioctl,
174                       DRM_MASTER | DRM_UNLOCKED),
175 };
176
177 static struct pci_device_id vmw_pci_id_list[] = {
178         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
179         {0, 0, 0}
180 };
181 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
182
183 static int enable_fbdev;
184
185 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
186 static void vmw_master_init(struct vmw_master *);
187 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
188                               void *ptr);
189
190 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
191 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
192
193 static void vmw_print_capabilities(uint32_t capabilities)
194 {
195         DRM_INFO("Capabilities:\n");
196         if (capabilities & SVGA_CAP_RECT_COPY)
197                 DRM_INFO("  Rect copy.\n");
198         if (capabilities & SVGA_CAP_CURSOR)
199                 DRM_INFO("  Cursor.\n");
200         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
201                 DRM_INFO("  Cursor bypass.\n");
202         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
203                 DRM_INFO("  Cursor bypass 2.\n");
204         if (capabilities & SVGA_CAP_8BIT_EMULATION)
205                 DRM_INFO("  8bit emulation.\n");
206         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
207                 DRM_INFO("  Alpha cursor.\n");
208         if (capabilities & SVGA_CAP_3D)
209                 DRM_INFO("  3D.\n");
210         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
211                 DRM_INFO("  Extended Fifo.\n");
212         if (capabilities & SVGA_CAP_MULTIMON)
213                 DRM_INFO("  Multimon.\n");
214         if (capabilities & SVGA_CAP_PITCHLOCK)
215                 DRM_INFO("  Pitchlock.\n");
216         if (capabilities & SVGA_CAP_IRQMASK)
217                 DRM_INFO("  Irq mask.\n");
218         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
219                 DRM_INFO("  Display Topology.\n");
220         if (capabilities & SVGA_CAP_GMR)
221                 DRM_INFO("  GMR.\n");
222         if (capabilities & SVGA_CAP_TRACES)
223                 DRM_INFO("  Traces.\n");
224         if (capabilities & SVGA_CAP_GMR2)
225                 DRM_INFO("  GMR2.\n");
226         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
227                 DRM_INFO("  Screen Object 2.\n");
228 }
229
230
231 /**
232  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
233  * the start of a buffer object.
234  *
235  * @dev_priv: The device private structure.
236  *
237  * This function will idle the buffer using an uninterruptible wait, then
238  * map the first page and initialize a pending occlusion query result structure,
239  * Finally it will unmap the buffer.
240  *
241  * TODO: Since we're only mapping a single page, we should optimize the map
242  * to use kmap_atomic / iomap_atomic.
243  */
244 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
245 {
246         struct ttm_bo_kmap_obj map;
247         volatile SVGA3dQueryResult *result;
248         bool dummy;
249         int ret;
250         struct ttm_bo_device *bdev = &dev_priv->bdev;
251         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
252
253         ttm_bo_reserve(bo, false, false, false, 0);
254         spin_lock(&bdev->fence_lock);
255         ret = ttm_bo_wait(bo, false, false, false);
256         spin_unlock(&bdev->fence_lock);
257         if (unlikely(ret != 0))
258                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
259                                          10*HZ);
260
261         ret = ttm_bo_kmap(bo, 0, 1, &map);
262         if (likely(ret == 0)) {
263                 result = ttm_kmap_obj_virtual(&map, &dummy);
264                 result->totalSize = sizeof(*result);
265                 result->state = SVGA3D_QUERYSTATE_PENDING;
266                 result->result32 = 0xff;
267                 ttm_bo_kunmap(&map);
268         } else
269                 DRM_ERROR("Dummy query buffer map failed.\n");
270         ttm_bo_unreserve(bo);
271 }
272
273
274 /**
275  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
276  *
277  * @dev_priv: A device private structure.
278  *
279  * This function creates a small buffer object that holds the query
280  * result for dummy queries emitted as query barriers.
281  * No interruptible waits are done within this function.
282  *
283  * Returns an error if bo creation fails.
284  */
285 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
286 {
287         return ttm_bo_create(&dev_priv->bdev,
288                              PAGE_SIZE,
289                              ttm_bo_type_device,
290                              &vmw_vram_sys_placement,
291                              0, 0, false, NULL,
292                              &dev_priv->dummy_query_bo);
293 }
294
295
296 static int vmw_request_device(struct vmw_private *dev_priv)
297 {
298         int ret;
299
300         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
301         if (unlikely(ret != 0)) {
302                 DRM_ERROR("Unable to initialize FIFO.\n");
303                 return ret;
304         }
305         vmw_fence_fifo_up(dev_priv->fman);
306         ret = vmw_dummy_query_bo_create(dev_priv);
307         if (unlikely(ret != 0))
308                 goto out_no_query_bo;
309         vmw_dummy_query_bo_prepare(dev_priv);
310
311         return 0;
312
313 out_no_query_bo:
314         vmw_fence_fifo_down(dev_priv->fman);
315         vmw_fifo_release(dev_priv, &dev_priv->fifo);
316         return ret;
317 }
318
319 static void vmw_release_device(struct vmw_private *dev_priv)
320 {
321         /*
322          * Previous destructions should've released
323          * the pinned bo.
324          */
325
326         BUG_ON(dev_priv->pinned_bo != NULL);
327
328         ttm_bo_unref(&dev_priv->dummy_query_bo);
329         vmw_fence_fifo_down(dev_priv->fman);
330         vmw_fifo_release(dev_priv, &dev_priv->fifo);
331 }
332
333 /**
334  * Increase the 3d resource refcount.
335  * If the count was prevously zero, initialize the fifo, switching to svga
336  * mode. Note that the master holds a ref as well, and may request an
337  * explicit switch to svga mode if fb is not running, using @unhide_svga.
338  */
339 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
340                         bool unhide_svga)
341 {
342         int ret = 0;
343
344         mutex_lock(&dev_priv->release_mutex);
345         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
346                 ret = vmw_request_device(dev_priv);
347                 if (unlikely(ret != 0))
348                         --dev_priv->num_3d_resources;
349         } else if (unhide_svga) {
350                 mutex_lock(&dev_priv->hw_mutex);
351                 vmw_write(dev_priv, SVGA_REG_ENABLE,
352                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
353                           ~SVGA_REG_ENABLE_HIDE);
354                 mutex_unlock(&dev_priv->hw_mutex);
355         }
356
357         mutex_unlock(&dev_priv->release_mutex);
358         return ret;
359 }
360
361 /**
362  * Decrease the 3d resource refcount.
363  * If the count reaches zero, disable the fifo, switching to vga mode.
364  * Note that the master holds a refcount as well, and may request an
365  * explicit switch to vga mode when it releases its refcount to account
366  * for the situation of an X server vt switch to VGA with 3d resources
367  * active.
368  */
369 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
370                          bool hide_svga)
371 {
372         int32_t n3d;
373
374         mutex_lock(&dev_priv->release_mutex);
375         if (unlikely(--dev_priv->num_3d_resources == 0))
376                 vmw_release_device(dev_priv);
377         else if (hide_svga) {
378                 mutex_lock(&dev_priv->hw_mutex);
379                 vmw_write(dev_priv, SVGA_REG_ENABLE,
380                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
381                           SVGA_REG_ENABLE_HIDE);
382                 mutex_unlock(&dev_priv->hw_mutex);
383         }
384
385         n3d = (int32_t) dev_priv->num_3d_resources;
386         mutex_unlock(&dev_priv->release_mutex);
387
388         BUG_ON(n3d < 0);
389 }
390
391 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
392 {
393         struct vmw_private *dev_priv;
394         int ret;
395         uint32_t svga_id;
396
397         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
398         if (unlikely(dev_priv == NULL)) {
399                 DRM_ERROR("Failed allocating a device private struct.\n");
400                 return -ENOMEM;
401         }
402         memset(dev_priv, 0, sizeof(*dev_priv));
403
404         dev_priv->dev = dev;
405         dev_priv->vmw_chipset = chipset;
406         dev_priv->last_read_seqno = (uint32_t) -100;
407         mutex_init(&dev_priv->hw_mutex);
408         mutex_init(&dev_priv->cmdbuf_mutex);
409         mutex_init(&dev_priv->release_mutex);
410         rwlock_init(&dev_priv->resource_lock);
411         idr_init(&dev_priv->context_idr);
412         idr_init(&dev_priv->surface_idr);
413         idr_init(&dev_priv->stream_idr);
414         mutex_init(&dev_priv->init_mutex);
415         init_waitqueue_head(&dev_priv->fence_queue);
416         init_waitqueue_head(&dev_priv->fifo_queue);
417         dev_priv->fence_queue_waiters = 0;
418         atomic_set(&dev_priv->fifo_queue_waiters, 0);
419         INIT_LIST_HEAD(&dev_priv->surface_lru);
420         dev_priv->used_memory_size = 0;
421
422         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
423         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
424         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
425
426         dev_priv->enable_fb = enable_fbdev;
427
428         mutex_lock(&dev_priv->hw_mutex);
429
430         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
431         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
432         if (svga_id != SVGA_ID_2) {
433                 ret = -ENOSYS;
434                 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
435                 mutex_unlock(&dev_priv->hw_mutex);
436                 goto out_err0;
437         }
438
439         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
440
441         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
442         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
443         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
444         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
445         if (dev_priv->capabilities & SVGA_CAP_GMR) {
446                 dev_priv->max_gmr_descriptors =
447                         vmw_read(dev_priv,
448                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
449                 dev_priv->max_gmr_ids =
450                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
451         }
452         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
453                 dev_priv->max_gmr_pages =
454                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
455                 dev_priv->memory_size =
456                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
457                 dev_priv->memory_size -= dev_priv->vram_size;
458         } else {
459                 /*
460                  * An arbitrary limit of 512MiB on surface
461                  * memory. But all HWV8 hardware supports GMR2.
462                  */
463                 dev_priv->memory_size = 512*1024*1024;
464         }
465
466         mutex_unlock(&dev_priv->hw_mutex);
467
468         vmw_print_capabilities(dev_priv->capabilities);
469
470         if (dev_priv->capabilities & SVGA_CAP_GMR) {
471                 DRM_INFO("Max GMR ids is %u\n",
472                          (unsigned)dev_priv->max_gmr_ids);
473                 DRM_INFO("Max GMR descriptors is %u\n",
474                          (unsigned)dev_priv->max_gmr_descriptors);
475         }
476         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
477                 DRM_INFO("Max number of GMR pages is %u\n",
478                          (unsigned)dev_priv->max_gmr_pages);
479                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
480                          (unsigned)dev_priv->memory_size / 1024);
481         }
482         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
483                  dev_priv->vram_start, dev_priv->vram_size / 1024);
484         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
485                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
486
487         ret = vmw_ttm_global_init(dev_priv);
488         if (unlikely(ret != 0))
489                 goto out_err0;
490
491
492         vmw_master_init(&dev_priv->fbdev_master);
493         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
494         dev_priv->active_master = &dev_priv->fbdev_master;
495
496
497         ret = ttm_bo_device_init(&dev_priv->bdev,
498                                  dev_priv->bo_global_ref.ref.object,
499                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
500                                  false);
501         if (unlikely(ret != 0)) {
502                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
503                 goto out_err1;
504         }
505
506         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
507                              (dev_priv->vram_size >> PAGE_SHIFT));
508         if (unlikely(ret != 0)) {
509                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
510                 goto out_err2;
511         }
512
513         dev_priv->has_gmr = true;
514         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
515                            dev_priv->max_gmr_ids) != 0) {
516                 DRM_INFO("No GMR memory available. "
517                          "Graphics memory resources are very limited.\n");
518                 dev_priv->has_gmr = false;
519         }
520
521         dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
522                                            dev_priv->mmio_size, DRM_MTRR_WC);
523
524         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
525                                          dev_priv->mmio_size);
526
527         if (unlikely(dev_priv->mmio_virt == NULL)) {
528                 ret = -ENOMEM;
529                 DRM_ERROR("Failed mapping MMIO.\n");
530                 goto out_err3;
531         }
532
533         /* Need mmio memory to check for fifo pitchlock cap. */
534         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
535             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
536             !vmw_fifo_have_pitchlock(dev_priv)) {
537                 ret = -ENOSYS;
538                 DRM_ERROR("Hardware has no pitchlock\n");
539                 goto out_err4;
540         }
541
542         dev_priv->tdev = ttm_object_device_init
543             (dev_priv->mem_global_ref.object, 12);
544
545         if (unlikely(dev_priv->tdev == NULL)) {
546                 DRM_ERROR("Unable to initialize TTM object management.\n");
547                 ret = -ENOMEM;
548                 goto out_err4;
549         }
550
551         dev->dev_private = dev_priv;
552
553         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
554         dev_priv->stealth = (ret != 0);
555         if (dev_priv->stealth) {
556                 /**
557                  * Request at least the mmio PCI resource.
558                  */
559
560                 DRM_INFO("It appears like vesafb is loaded. "
561                          "Ignore above error if any.\n");
562                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
563                 if (unlikely(ret != 0)) {
564                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
565                         goto out_no_device;
566                 }
567         }
568
569         dev_priv->fman = vmw_fence_manager_init(dev_priv);
570         if (unlikely(dev_priv->fman == NULL))
571                 goto out_no_fman;
572
573         /* Need to start the fifo to check if we can do screen objects */
574         ret = vmw_3d_resource_inc(dev_priv, true);
575         if (unlikely(ret != 0))
576                 goto out_no_fifo;
577         vmw_kms_save_vga(dev_priv);
578
579         /* Start kms and overlay systems, needs fifo. */
580         ret = vmw_kms_init(dev_priv);
581         if (unlikely(ret != 0))
582                 goto out_no_kms;
583         vmw_overlay_init(dev_priv);
584
585         /* 3D Depends on Screen Objects being used. */
586         DRM_INFO("Detected %sdevice 3D availability.\n",
587                  vmw_fifo_have_3d(dev_priv) ?
588                  "" : "no ");
589
590         /* We might be done with the fifo now */
591         if (dev_priv->enable_fb) {
592                 vmw_fb_init(dev_priv);
593         } else {
594                 vmw_kms_restore_vga(dev_priv);
595                 vmw_3d_resource_dec(dev_priv, true);
596         }
597
598         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
599                 ret = drm_irq_install(dev);
600                 if (unlikely(ret != 0)) {
601                         DRM_ERROR("Failed installing irq: %d\n", ret);
602                         goto out_no_irq;
603                 }
604         }
605
606         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
607         register_pm_notifier(&dev_priv->pm_nb);
608
609         return 0;
610
611 out_no_irq:
612         if (dev_priv->enable_fb)
613                 vmw_fb_close(dev_priv);
614         vmw_overlay_close(dev_priv);
615         vmw_kms_close(dev_priv);
616 out_no_kms:
617         /* We still have a 3D resource reference held */
618         if (dev_priv->enable_fb) {
619                 vmw_kms_restore_vga(dev_priv);
620                 vmw_3d_resource_dec(dev_priv, false);
621         }
622 out_no_fifo:
623         vmw_fence_manager_takedown(dev_priv->fman);
624 out_no_fman:
625         if (dev_priv->stealth)
626                 pci_release_region(dev->pdev, 2);
627         else
628                 pci_release_regions(dev->pdev);
629 out_no_device:
630         ttm_object_device_release(&dev_priv->tdev);
631 out_err4:
632         iounmap(dev_priv->mmio_virt);
633 out_err3:
634         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
635                      dev_priv->mmio_size, DRM_MTRR_WC);
636         if (dev_priv->has_gmr)
637                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
638         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
639 out_err2:
640         (void)ttm_bo_device_release(&dev_priv->bdev);
641 out_err1:
642         vmw_ttm_global_release(dev_priv);
643 out_err0:
644         idr_destroy(&dev_priv->surface_idr);
645         idr_destroy(&dev_priv->context_idr);
646         idr_destroy(&dev_priv->stream_idr);
647         kfree(dev_priv);
648         return ret;
649 }
650
651 static int vmw_driver_unload(struct drm_device *dev)
652 {
653         struct vmw_private *dev_priv = vmw_priv(dev);
654
655         unregister_pm_notifier(&dev_priv->pm_nb);
656
657         if (dev_priv->ctx.cmd_bounce)
658                 vfree(dev_priv->ctx.cmd_bounce);
659         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
660                 drm_irq_uninstall(dev_priv->dev);
661         if (dev_priv->enable_fb) {
662                 vmw_fb_close(dev_priv);
663                 vmw_kms_restore_vga(dev_priv);
664                 vmw_3d_resource_dec(dev_priv, false);
665         }
666         vmw_kms_close(dev_priv);
667         vmw_overlay_close(dev_priv);
668         vmw_fence_manager_takedown(dev_priv->fman);
669         if (dev_priv->stealth)
670                 pci_release_region(dev->pdev, 2);
671         else
672                 pci_release_regions(dev->pdev);
673
674         ttm_object_device_release(&dev_priv->tdev);
675         iounmap(dev_priv->mmio_virt);
676         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
677                      dev_priv->mmio_size, DRM_MTRR_WC);
678         if (dev_priv->has_gmr)
679                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
680         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
681         (void)ttm_bo_device_release(&dev_priv->bdev);
682         vmw_ttm_global_release(dev_priv);
683         idr_destroy(&dev_priv->surface_idr);
684         idr_destroy(&dev_priv->context_idr);
685         idr_destroy(&dev_priv->stream_idr);
686
687         kfree(dev_priv);
688
689         return 0;
690 }
691
692 static void vmw_postclose(struct drm_device *dev,
693                          struct drm_file *file_priv)
694 {
695         struct vmw_fpriv *vmw_fp;
696
697         vmw_fp = vmw_fpriv(file_priv);
698         ttm_object_file_release(&vmw_fp->tfile);
699         if (vmw_fp->locked_master)
700                 drm_master_put(&vmw_fp->locked_master);
701         kfree(vmw_fp);
702 }
703
704 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
705 {
706         struct vmw_private *dev_priv = vmw_priv(dev);
707         struct vmw_fpriv *vmw_fp;
708         int ret = -ENOMEM;
709
710         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
711         if (unlikely(vmw_fp == NULL))
712                 return ret;
713
714         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
715         if (unlikely(vmw_fp->tfile == NULL))
716                 goto out_no_tfile;
717
718         file_priv->driver_priv = vmw_fp;
719
720         if (unlikely(dev_priv->bdev.dev_mapping == NULL))
721                 dev_priv->bdev.dev_mapping =
722                         file_priv->filp->f_path.dentry->d_inode->i_mapping;
723
724         return 0;
725
726 out_no_tfile:
727         kfree(vmw_fp);
728         return ret;
729 }
730
731 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
732                                unsigned long arg)
733 {
734         struct drm_file *file_priv = filp->private_data;
735         struct drm_device *dev = file_priv->minor->dev;
736         unsigned int nr = DRM_IOCTL_NR(cmd);
737
738         /*
739          * Do extra checking on driver private ioctls.
740          */
741
742         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
743             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
744                 struct drm_ioctl_desc *ioctl =
745                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
746
747                 if (unlikely(ioctl->cmd_drv != cmd)) {
748                         DRM_ERROR("Invalid command format, ioctl %d\n",
749                                   nr - DRM_COMMAND_BASE);
750                         return -EINVAL;
751                 }
752         }
753
754         return drm_ioctl(filp, cmd, arg);
755 }
756
757 static int vmw_firstopen(struct drm_device *dev)
758 {
759         struct vmw_private *dev_priv = vmw_priv(dev);
760         dev_priv->is_opened = true;
761
762         return 0;
763 }
764
765 static void vmw_lastclose(struct drm_device *dev)
766 {
767         struct vmw_private *dev_priv = vmw_priv(dev);
768         struct drm_crtc *crtc;
769         struct drm_mode_set set;
770         int ret;
771
772         /**
773          * Do nothing on the lastclose call from drm_unload.
774          */
775
776         if (!dev_priv->is_opened)
777                 return;
778
779         dev_priv->is_opened = false;
780         set.x = 0;
781         set.y = 0;
782         set.fb = NULL;
783         set.mode = NULL;
784         set.connectors = NULL;
785         set.num_connectors = 0;
786
787         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
788                 set.crtc = crtc;
789                 ret = crtc->funcs->set_config(&set);
790                 WARN_ON(ret != 0);
791         }
792
793 }
794
795 static void vmw_master_init(struct vmw_master *vmaster)
796 {
797         ttm_lock_init(&vmaster->lock);
798         INIT_LIST_HEAD(&vmaster->fb_surf);
799         mutex_init(&vmaster->fb_surf_mutex);
800 }
801
802 static int vmw_master_create(struct drm_device *dev,
803                              struct drm_master *master)
804 {
805         struct vmw_master *vmaster;
806
807         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
808         if (unlikely(vmaster == NULL))
809                 return -ENOMEM;
810
811         vmw_master_init(vmaster);
812         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
813         master->driver_priv = vmaster;
814
815         return 0;
816 }
817
818 static void vmw_master_destroy(struct drm_device *dev,
819                                struct drm_master *master)
820 {
821         struct vmw_master *vmaster = vmw_master(master);
822
823         master->driver_priv = NULL;
824         kfree(vmaster);
825 }
826
827
828 static int vmw_master_set(struct drm_device *dev,
829                           struct drm_file *file_priv,
830                           bool from_open)
831 {
832         struct vmw_private *dev_priv = vmw_priv(dev);
833         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
834         struct vmw_master *active = dev_priv->active_master;
835         struct vmw_master *vmaster = vmw_master(file_priv->master);
836         int ret = 0;
837
838         if (!dev_priv->enable_fb) {
839                 ret = vmw_3d_resource_inc(dev_priv, true);
840                 if (unlikely(ret != 0))
841                         return ret;
842                 vmw_kms_save_vga(dev_priv);
843                 mutex_lock(&dev_priv->hw_mutex);
844                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
845                 mutex_unlock(&dev_priv->hw_mutex);
846         }
847
848         if (active) {
849                 BUG_ON(active != &dev_priv->fbdev_master);
850                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
851                 if (unlikely(ret != 0))
852                         goto out_no_active_lock;
853
854                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
855                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
856                 if (unlikely(ret != 0)) {
857                         DRM_ERROR("Unable to clean VRAM on "
858                                   "master drop.\n");
859                 }
860
861                 dev_priv->active_master = NULL;
862         }
863
864         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
865         if (!from_open) {
866                 ttm_vt_unlock(&vmaster->lock);
867                 BUG_ON(vmw_fp->locked_master != file_priv->master);
868                 drm_master_put(&vmw_fp->locked_master);
869         }
870
871         dev_priv->active_master = vmaster;
872
873         return 0;
874
875 out_no_active_lock:
876         if (!dev_priv->enable_fb) {
877                 mutex_lock(&dev_priv->hw_mutex);
878                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
879                 mutex_unlock(&dev_priv->hw_mutex);
880                 vmw_kms_restore_vga(dev_priv);
881                 vmw_3d_resource_dec(dev_priv, true);
882         }
883         return ret;
884 }
885
886 static void vmw_master_drop(struct drm_device *dev,
887                             struct drm_file *file_priv,
888                             bool from_release)
889 {
890         struct vmw_private *dev_priv = vmw_priv(dev);
891         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
892         struct vmw_master *vmaster = vmw_master(file_priv->master);
893         int ret;
894
895         /**
896          * Make sure the master doesn't disappear while we have
897          * it locked.
898          */
899
900         vmw_fp->locked_master = drm_master_get(file_priv->master);
901         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
902         vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
903
904         if (unlikely((ret != 0))) {
905                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
906                 drm_master_put(&vmw_fp->locked_master);
907         }
908
909         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
910
911         if (!dev_priv->enable_fb) {
912                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
913                 if (unlikely(ret != 0))
914                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
915                 mutex_lock(&dev_priv->hw_mutex);
916                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
917                 mutex_unlock(&dev_priv->hw_mutex);
918                 vmw_kms_restore_vga(dev_priv);
919                 vmw_3d_resource_dec(dev_priv, true);
920         }
921
922         dev_priv->active_master = &dev_priv->fbdev_master;
923         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
924         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
925
926         if (dev_priv->enable_fb)
927                 vmw_fb_on(dev_priv);
928 }
929
930
931 static void vmw_remove(struct pci_dev *pdev)
932 {
933         struct drm_device *dev = pci_get_drvdata(pdev);
934
935         drm_put_dev(dev);
936 }
937
938 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
939                               void *ptr)
940 {
941         struct vmw_private *dev_priv =
942                 container_of(nb, struct vmw_private, pm_nb);
943         struct vmw_master *vmaster = dev_priv->active_master;
944
945         switch (val) {
946         case PM_HIBERNATION_PREPARE:
947         case PM_SUSPEND_PREPARE:
948                 ttm_suspend_lock(&vmaster->lock);
949
950                 /**
951                  * This empties VRAM and unbinds all GMR bindings.
952                  * Buffer contents is moved to swappable memory.
953                  */
954                 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
955                 ttm_bo_swapout_all(&dev_priv->bdev);
956
957                 break;
958         case PM_POST_HIBERNATION:
959         case PM_POST_SUSPEND:
960         case PM_POST_RESTORE:
961                 ttm_suspend_unlock(&vmaster->lock);
962
963                 break;
964         case PM_RESTORE_PREPARE:
965                 break;
966         default:
967                 break;
968         }
969         return 0;
970 }
971
972 /**
973  * These might not be needed with the virtual SVGA device.
974  */
975
976 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
977 {
978         struct drm_device *dev = pci_get_drvdata(pdev);
979         struct vmw_private *dev_priv = vmw_priv(dev);
980
981         if (dev_priv->num_3d_resources != 0) {
982                 DRM_INFO("Can't suspend or hibernate "
983                          "while 3D resources are active.\n");
984                 return -EBUSY;
985         }
986
987         pci_save_state(pdev);
988         pci_disable_device(pdev);
989         pci_set_power_state(pdev, PCI_D3hot);
990         return 0;
991 }
992
993 static int vmw_pci_resume(struct pci_dev *pdev)
994 {
995         pci_set_power_state(pdev, PCI_D0);
996         pci_restore_state(pdev);
997         return pci_enable_device(pdev);
998 }
999
1000 static int vmw_pm_suspend(struct device *kdev)
1001 {
1002         struct pci_dev *pdev = to_pci_dev(kdev);
1003         struct pm_message dummy;
1004
1005         dummy.event = 0;
1006
1007         return vmw_pci_suspend(pdev, dummy);
1008 }
1009
1010 static int vmw_pm_resume(struct device *kdev)
1011 {
1012         struct pci_dev *pdev = to_pci_dev(kdev);
1013
1014         return vmw_pci_resume(pdev);
1015 }
1016
1017 static int vmw_pm_prepare(struct device *kdev)
1018 {
1019         struct pci_dev *pdev = to_pci_dev(kdev);
1020         struct drm_device *dev = pci_get_drvdata(pdev);
1021         struct vmw_private *dev_priv = vmw_priv(dev);
1022
1023         /**
1024          * Release 3d reference held by fbdev and potentially
1025          * stop fifo.
1026          */
1027         dev_priv->suspended = true;
1028         if (dev_priv->enable_fb)
1029                         vmw_3d_resource_dec(dev_priv, true);
1030
1031         if (dev_priv->num_3d_resources != 0) {
1032
1033                 DRM_INFO("Can't suspend or hibernate "
1034                          "while 3D resources are active.\n");
1035
1036                 if (dev_priv->enable_fb)
1037                         vmw_3d_resource_inc(dev_priv, true);
1038                 dev_priv->suspended = false;
1039                 return -EBUSY;
1040         }
1041
1042         return 0;
1043 }
1044
1045 static void vmw_pm_complete(struct device *kdev)
1046 {
1047         struct pci_dev *pdev = to_pci_dev(kdev);
1048         struct drm_device *dev = pci_get_drvdata(pdev);
1049         struct vmw_private *dev_priv = vmw_priv(dev);
1050
1051         /**
1052          * Reclaim 3d reference held by fbdev and potentially
1053          * start fifo.
1054          */
1055         if (dev_priv->enable_fb)
1056                         vmw_3d_resource_inc(dev_priv, false);
1057
1058         dev_priv->suspended = false;
1059 }
1060
1061 static const struct dev_pm_ops vmw_pm_ops = {
1062         .prepare = vmw_pm_prepare,
1063         .complete = vmw_pm_complete,
1064         .suspend = vmw_pm_suspend,
1065         .resume = vmw_pm_resume,
1066 };
1067
1068 static struct drm_driver driver = {
1069         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1070         DRIVER_MODESET,
1071         .load = vmw_driver_load,
1072         .unload = vmw_driver_unload,
1073         .firstopen = vmw_firstopen,
1074         .lastclose = vmw_lastclose,
1075         .irq_preinstall = vmw_irq_preinstall,
1076         .irq_postinstall = vmw_irq_postinstall,
1077         .irq_uninstall = vmw_irq_uninstall,
1078         .irq_handler = vmw_irq_handler,
1079         .get_vblank_counter = vmw_get_vblank_counter,
1080         .enable_vblank = vmw_enable_vblank,
1081         .disable_vblank = vmw_disable_vblank,
1082         .reclaim_buffers_locked = NULL,
1083         .ioctls = vmw_ioctls,
1084         .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1085         .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
1086         .master_create = vmw_master_create,
1087         .master_destroy = vmw_master_destroy,
1088         .master_set = vmw_master_set,
1089         .master_drop = vmw_master_drop,
1090         .open = vmw_driver_open,
1091         .postclose = vmw_postclose,
1092         .fops = {
1093                  .owner = THIS_MODULE,
1094                  .open = drm_open,
1095                  .release = drm_release,
1096                  .unlocked_ioctl = vmw_unlocked_ioctl,
1097                  .mmap = vmw_mmap,
1098                  .poll = vmw_fops_poll,
1099                  .read = vmw_fops_read,
1100                  .fasync = drm_fasync,
1101 #if defined(CONFIG_COMPAT)
1102                  .compat_ioctl = drm_compat_ioctl,
1103 #endif
1104                  .llseek = noop_llseek,
1105         },
1106         .name = VMWGFX_DRIVER_NAME,
1107         .desc = VMWGFX_DRIVER_DESC,
1108         .date = VMWGFX_DRIVER_DATE,
1109         .major = VMWGFX_DRIVER_MAJOR,
1110         .minor = VMWGFX_DRIVER_MINOR,
1111         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1112 };
1113
1114 static struct pci_driver vmw_pci_driver = {
1115         .name = VMWGFX_DRIVER_NAME,
1116         .id_table = vmw_pci_id_list,
1117         .probe = vmw_probe,
1118         .remove = vmw_remove,
1119         .driver = {
1120                 .pm = &vmw_pm_ops
1121         }
1122 };
1123
1124 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1125 {
1126         return drm_get_pci_dev(pdev, ent, &driver);
1127 }
1128
1129 static int __init vmwgfx_init(void)
1130 {
1131         int ret;
1132         ret = drm_pci_init(&driver, &vmw_pci_driver);
1133         if (ret)
1134                 DRM_ERROR("Failed initializing DRM.\n");
1135         return ret;
1136 }
1137
1138 static void __exit vmwgfx_exit(void)
1139 {
1140         drm_pci_exit(&driver, &vmw_pci_driver);
1141 }
1142
1143 module_init(vmwgfx_init);
1144 module_exit(vmwgfx_exit);
1145
1146 MODULE_AUTHOR("VMware Inc. and others");
1147 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1148 MODULE_LICENSE("GPL and additional rights");
1149 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1150                __stringify(VMWGFX_DRIVER_MINOR) "."
1151                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1152                "0");