Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur...
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28
29 #include "drmP.h"
30 #include "vmwgfx_drv.h"
31 #include "ttm/ttm_placement.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_object.h"
34 #include "ttm/ttm_module.h"
35
36 #define VMWGFX_DRIVER_NAME "vmwgfx"
37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38 #define VMWGFX_CHIP_SVGAII 0
39 #define VMW_FB_RESERVATION 0
40
41 /**
42  * Fully encoded drm commands. Might move to vmw_drm.h
43  */
44
45 #define DRM_IOCTL_VMW_GET_PARAM                                 \
46         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
47                  struct drm_vmw_getparam_arg)
48 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
49         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
50                 union drm_vmw_alloc_dmabuf_arg)
51 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
52         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
53                 struct drm_vmw_unref_dmabuf_arg)
54 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
55         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
56                  struct drm_vmw_cursor_bypass_arg)
57
58 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
59         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
60                  struct drm_vmw_control_stream_arg)
61 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
62         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
63                  struct drm_vmw_stream_arg)
64 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
65         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
66                  struct drm_vmw_stream_arg)
67
68 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
69         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
70                 struct drm_vmw_context_arg)
71 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
72         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
73                 struct drm_vmw_context_arg)
74 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
75         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
76                  union drm_vmw_surface_create_arg)
77 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
78         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
79                  struct drm_vmw_surface_arg)
80 #define DRM_IOCTL_VMW_REF_SURFACE                               \
81         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
82                  union drm_vmw_surface_reference_arg)
83 #define DRM_IOCTL_VMW_EXECBUF                                   \
84         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
85                 struct drm_vmw_execbuf_arg)
86 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
87         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
88                  struct drm_vmw_get_3d_cap_arg)
89 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
90         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
91                  struct drm_vmw_fence_wait_arg)
92 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
93         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
94                  struct drm_vmw_fence_signaled_arg)
95 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
96         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
97                  struct drm_vmw_fence_arg)
98 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
99         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
100                  struct drm_vmw_fence_event_arg)
101 #define DRM_IOCTL_VMW_PRESENT                                   \
102         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
103                  struct drm_vmw_present_arg)
104 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
105         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
106                  struct drm_vmw_present_readback_arg)
107 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
108         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
109                  struct drm_vmw_update_layout_arg)
110
111 /**
112  * The core DRM version of this macro doesn't account for
113  * DRM_COMMAND_BASE.
114  */
115
116 #define VMW_IOCTL_DEF(ioctl, func, flags) \
117   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
118
119 /**
120  * Ioctl definitions.
121  */
122
123 static struct drm_ioctl_desc vmw_ioctls[] = {
124         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
125                       DRM_AUTH | DRM_UNLOCKED),
126         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
127                       DRM_AUTH | DRM_UNLOCKED),
128         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
129                       DRM_AUTH | DRM_UNLOCKED),
130         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
131                       vmw_kms_cursor_bypass_ioctl,
132                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
133
134         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
135                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
136         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
137                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
138         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
139                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
140
141         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
142                       DRM_AUTH | DRM_UNLOCKED),
143         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
144                       DRM_AUTH | DRM_UNLOCKED),
145         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
146                       DRM_AUTH | DRM_UNLOCKED),
147         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
148                       DRM_AUTH | DRM_UNLOCKED),
149         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
150                       DRM_AUTH | DRM_UNLOCKED),
151         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
152                       DRM_AUTH | DRM_UNLOCKED),
153         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
154                       DRM_AUTH | DRM_UNLOCKED),
155         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
156                       vmw_fence_obj_signaled_ioctl,
157                       DRM_AUTH | DRM_UNLOCKED),
158         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
159                       DRM_AUTH | DRM_UNLOCKED),
160         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
161                       vmw_fence_event_ioctl,
162                       DRM_AUTH | DRM_UNLOCKED),
163         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
164                       DRM_AUTH | DRM_UNLOCKED),
165
166         /* these allow direct access to the framebuffers mark as master only */
167         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
168                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
169         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
170                       vmw_present_readback_ioctl,
171                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
172         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
173                       vmw_kms_update_layout_ioctl,
174                       DRM_MASTER | DRM_UNLOCKED),
175 };
176
177 static struct pci_device_id vmw_pci_id_list[] = {
178         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
179         {0, 0, 0}
180 };
181
182 static int enable_fbdev;
183
184 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
185 static void vmw_master_init(struct vmw_master *);
186 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
187                               void *ptr);
188
189 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
190 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
191
192 static void vmw_print_capabilities(uint32_t capabilities)
193 {
194         DRM_INFO("Capabilities:\n");
195         if (capabilities & SVGA_CAP_RECT_COPY)
196                 DRM_INFO("  Rect copy.\n");
197         if (capabilities & SVGA_CAP_CURSOR)
198                 DRM_INFO("  Cursor.\n");
199         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
200                 DRM_INFO("  Cursor bypass.\n");
201         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
202                 DRM_INFO("  Cursor bypass 2.\n");
203         if (capabilities & SVGA_CAP_8BIT_EMULATION)
204                 DRM_INFO("  8bit emulation.\n");
205         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
206                 DRM_INFO("  Alpha cursor.\n");
207         if (capabilities & SVGA_CAP_3D)
208                 DRM_INFO("  3D.\n");
209         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
210                 DRM_INFO("  Extended Fifo.\n");
211         if (capabilities & SVGA_CAP_MULTIMON)
212                 DRM_INFO("  Multimon.\n");
213         if (capabilities & SVGA_CAP_PITCHLOCK)
214                 DRM_INFO("  Pitchlock.\n");
215         if (capabilities & SVGA_CAP_IRQMASK)
216                 DRM_INFO("  Irq mask.\n");
217         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
218                 DRM_INFO("  Display Topology.\n");
219         if (capabilities & SVGA_CAP_GMR)
220                 DRM_INFO("  GMR.\n");
221         if (capabilities & SVGA_CAP_TRACES)
222                 DRM_INFO("  Traces.\n");
223         if (capabilities & SVGA_CAP_GMR2)
224                 DRM_INFO("  GMR2.\n");
225         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
226                 DRM_INFO("  Screen Object 2.\n");
227 }
228
229
230 /**
231  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
232  * the start of a buffer object.
233  *
234  * @dev_priv: The device private structure.
235  *
236  * This function will idle the buffer using an uninterruptible wait, then
237  * map the first page and initialize a pending occlusion query result structure,
238  * Finally it will unmap the buffer.
239  *
240  * TODO: Since we're only mapping a single page, we should optimize the map
241  * to use kmap_atomic / iomap_atomic.
242  */
243 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
244 {
245         struct ttm_bo_kmap_obj map;
246         volatile SVGA3dQueryResult *result;
247         bool dummy;
248         int ret;
249         struct ttm_bo_device *bdev = &dev_priv->bdev;
250         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
251
252         ttm_bo_reserve(bo, false, false, false, 0);
253         spin_lock(&bdev->fence_lock);
254         ret = ttm_bo_wait(bo, false, false, false);
255         spin_unlock(&bdev->fence_lock);
256         if (unlikely(ret != 0))
257                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
258                                          10*HZ);
259
260         ret = ttm_bo_kmap(bo, 0, 1, &map);
261         if (likely(ret == 0)) {
262                 result = ttm_kmap_obj_virtual(&map, &dummy);
263                 result->totalSize = sizeof(*result);
264                 result->state = SVGA3D_QUERYSTATE_PENDING;
265                 result->result32 = 0xff;
266                 ttm_bo_kunmap(&map);
267         } else
268                 DRM_ERROR("Dummy query buffer map failed.\n");
269         ttm_bo_unreserve(bo);
270 }
271
272
273 /**
274  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
275  *
276  * @dev_priv: A device private structure.
277  *
278  * This function creates a small buffer object that holds the query
279  * result for dummy queries emitted as query barriers.
280  * No interruptible waits are done within this function.
281  *
282  * Returns an error if bo creation fails.
283  */
284 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
285 {
286         return ttm_bo_create(&dev_priv->bdev,
287                              PAGE_SIZE,
288                              ttm_bo_type_device,
289                              &vmw_vram_sys_placement,
290                              0, 0, false, NULL,
291                              &dev_priv->dummy_query_bo);
292 }
293
294
295 static int vmw_request_device(struct vmw_private *dev_priv)
296 {
297         int ret;
298
299         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
300         if (unlikely(ret != 0)) {
301                 DRM_ERROR("Unable to initialize FIFO.\n");
302                 return ret;
303         }
304         vmw_fence_fifo_up(dev_priv->fman);
305         ret = vmw_dummy_query_bo_create(dev_priv);
306         if (unlikely(ret != 0))
307                 goto out_no_query_bo;
308         vmw_dummy_query_bo_prepare(dev_priv);
309
310         return 0;
311
312 out_no_query_bo:
313         vmw_fence_fifo_down(dev_priv->fman);
314         vmw_fifo_release(dev_priv, &dev_priv->fifo);
315         return ret;
316 }
317
318 static void vmw_release_device(struct vmw_private *dev_priv)
319 {
320         /*
321          * Previous destructions should've released
322          * the pinned bo.
323          */
324
325         BUG_ON(dev_priv->pinned_bo != NULL);
326
327         ttm_bo_unref(&dev_priv->dummy_query_bo);
328         vmw_fence_fifo_down(dev_priv->fman);
329         vmw_fifo_release(dev_priv, &dev_priv->fifo);
330 }
331
332 /**
333  * Increase the 3d resource refcount.
334  * If the count was prevously zero, initialize the fifo, switching to svga
335  * mode. Note that the master holds a ref as well, and may request an
336  * explicit switch to svga mode if fb is not running, using @unhide_svga.
337  */
338 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
339                         bool unhide_svga)
340 {
341         int ret = 0;
342
343         mutex_lock(&dev_priv->release_mutex);
344         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
345                 ret = vmw_request_device(dev_priv);
346                 if (unlikely(ret != 0))
347                         --dev_priv->num_3d_resources;
348         } else if (unhide_svga) {
349                 mutex_lock(&dev_priv->hw_mutex);
350                 vmw_write(dev_priv, SVGA_REG_ENABLE,
351                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
352                           ~SVGA_REG_ENABLE_HIDE);
353                 mutex_unlock(&dev_priv->hw_mutex);
354         }
355
356         mutex_unlock(&dev_priv->release_mutex);
357         return ret;
358 }
359
360 /**
361  * Decrease the 3d resource refcount.
362  * If the count reaches zero, disable the fifo, switching to vga mode.
363  * Note that the master holds a refcount as well, and may request an
364  * explicit switch to vga mode when it releases its refcount to account
365  * for the situation of an X server vt switch to VGA with 3d resources
366  * active.
367  */
368 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
369                          bool hide_svga)
370 {
371         int32_t n3d;
372
373         mutex_lock(&dev_priv->release_mutex);
374         if (unlikely(--dev_priv->num_3d_resources == 0))
375                 vmw_release_device(dev_priv);
376         else if (hide_svga) {
377                 mutex_lock(&dev_priv->hw_mutex);
378                 vmw_write(dev_priv, SVGA_REG_ENABLE,
379                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
380                           SVGA_REG_ENABLE_HIDE);
381                 mutex_unlock(&dev_priv->hw_mutex);
382         }
383
384         n3d = (int32_t) dev_priv->num_3d_resources;
385         mutex_unlock(&dev_priv->release_mutex);
386
387         BUG_ON(n3d < 0);
388 }
389
390 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
391 {
392         struct vmw_private *dev_priv;
393         int ret;
394         uint32_t svga_id;
395
396         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
397         if (unlikely(dev_priv == NULL)) {
398                 DRM_ERROR("Failed allocating a device private struct.\n");
399                 return -ENOMEM;
400         }
401         memset(dev_priv, 0, sizeof(*dev_priv));
402
403         dev_priv->dev = dev;
404         dev_priv->vmw_chipset = chipset;
405         dev_priv->last_read_seqno = (uint32_t) -100;
406         mutex_init(&dev_priv->hw_mutex);
407         mutex_init(&dev_priv->cmdbuf_mutex);
408         mutex_init(&dev_priv->release_mutex);
409         rwlock_init(&dev_priv->resource_lock);
410         idr_init(&dev_priv->context_idr);
411         idr_init(&dev_priv->surface_idr);
412         idr_init(&dev_priv->stream_idr);
413         mutex_init(&dev_priv->init_mutex);
414         init_waitqueue_head(&dev_priv->fence_queue);
415         init_waitqueue_head(&dev_priv->fifo_queue);
416         dev_priv->fence_queue_waiters = 0;
417         atomic_set(&dev_priv->fifo_queue_waiters, 0);
418         INIT_LIST_HEAD(&dev_priv->surface_lru);
419         dev_priv->used_memory_size = 0;
420
421         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
422         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
423         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
424
425         dev_priv->enable_fb = enable_fbdev;
426
427         mutex_lock(&dev_priv->hw_mutex);
428
429         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
430         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
431         if (svga_id != SVGA_ID_2) {
432                 ret = -ENOSYS;
433                 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
434                 mutex_unlock(&dev_priv->hw_mutex);
435                 goto out_err0;
436         }
437
438         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
439
440         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
441         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
442         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
443         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
444         if (dev_priv->capabilities & SVGA_CAP_GMR) {
445                 dev_priv->max_gmr_descriptors =
446                         vmw_read(dev_priv,
447                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
448                 dev_priv->max_gmr_ids =
449                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
450         }
451         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
452                 dev_priv->max_gmr_pages =
453                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
454                 dev_priv->memory_size =
455                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
456                 dev_priv->memory_size -= dev_priv->vram_size;
457         } else {
458                 /*
459                  * An arbitrary limit of 512MiB on surface
460                  * memory. But all HWV8 hardware supports GMR2.
461                  */
462                 dev_priv->memory_size = 512*1024*1024;
463         }
464
465         mutex_unlock(&dev_priv->hw_mutex);
466
467         vmw_print_capabilities(dev_priv->capabilities);
468
469         if (dev_priv->capabilities & SVGA_CAP_GMR) {
470                 DRM_INFO("Max GMR ids is %u\n",
471                          (unsigned)dev_priv->max_gmr_ids);
472                 DRM_INFO("Max GMR descriptors is %u\n",
473                          (unsigned)dev_priv->max_gmr_descriptors);
474         }
475         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
476                 DRM_INFO("Max number of GMR pages is %u\n",
477                          (unsigned)dev_priv->max_gmr_pages);
478                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
479                          (unsigned)dev_priv->memory_size / 1024);
480         }
481         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
482                  dev_priv->vram_start, dev_priv->vram_size / 1024);
483         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
484                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
485
486         ret = vmw_ttm_global_init(dev_priv);
487         if (unlikely(ret != 0))
488                 goto out_err0;
489
490
491         vmw_master_init(&dev_priv->fbdev_master);
492         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
493         dev_priv->active_master = &dev_priv->fbdev_master;
494
495
496         ret = ttm_bo_device_init(&dev_priv->bdev,
497                                  dev_priv->bo_global_ref.ref.object,
498                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
499                                  false);
500         if (unlikely(ret != 0)) {
501                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
502                 goto out_err1;
503         }
504
505         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
506                              (dev_priv->vram_size >> PAGE_SHIFT));
507         if (unlikely(ret != 0)) {
508                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
509                 goto out_err2;
510         }
511
512         dev_priv->has_gmr = true;
513         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
514                            dev_priv->max_gmr_ids) != 0) {
515                 DRM_INFO("No GMR memory available. "
516                          "Graphics memory resources are very limited.\n");
517                 dev_priv->has_gmr = false;
518         }
519
520         dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
521                                            dev_priv->mmio_size, DRM_MTRR_WC);
522
523         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
524                                          dev_priv->mmio_size);
525
526         if (unlikely(dev_priv->mmio_virt == NULL)) {
527                 ret = -ENOMEM;
528                 DRM_ERROR("Failed mapping MMIO.\n");
529                 goto out_err3;
530         }
531
532         /* Need mmio memory to check for fifo pitchlock cap. */
533         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
534             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
535             !vmw_fifo_have_pitchlock(dev_priv)) {
536                 ret = -ENOSYS;
537                 DRM_ERROR("Hardware has no pitchlock\n");
538                 goto out_err4;
539         }
540
541         dev_priv->tdev = ttm_object_device_init
542             (dev_priv->mem_global_ref.object, 12);
543
544         if (unlikely(dev_priv->tdev == NULL)) {
545                 DRM_ERROR("Unable to initialize TTM object management.\n");
546                 ret = -ENOMEM;
547                 goto out_err4;
548         }
549
550         dev->dev_private = dev_priv;
551
552         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
553         dev_priv->stealth = (ret != 0);
554         if (dev_priv->stealth) {
555                 /**
556                  * Request at least the mmio PCI resource.
557                  */
558
559                 DRM_INFO("It appears like vesafb is loaded. "
560                          "Ignore above error if any.\n");
561                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
562                 if (unlikely(ret != 0)) {
563                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
564                         goto out_no_device;
565                 }
566         }
567
568         dev_priv->fman = vmw_fence_manager_init(dev_priv);
569         if (unlikely(dev_priv->fman == NULL))
570                 goto out_no_fman;
571
572         /* Need to start the fifo to check if we can do screen objects */
573         ret = vmw_3d_resource_inc(dev_priv, true);
574         if (unlikely(ret != 0))
575                 goto out_no_fifo;
576         vmw_kms_save_vga(dev_priv);
577
578         /* Start kms and overlay systems, needs fifo. */
579         ret = vmw_kms_init(dev_priv);
580         if (unlikely(ret != 0))
581                 goto out_no_kms;
582         vmw_overlay_init(dev_priv);
583
584         /* 3D Depends on Screen Objects being used. */
585         DRM_INFO("Detected %sdevice 3D availability.\n",
586                  vmw_fifo_have_3d(dev_priv) ?
587                  "" : "no ");
588
589         /* We might be done with the fifo now */
590         if (dev_priv->enable_fb) {
591                 vmw_fb_init(dev_priv);
592         } else {
593                 vmw_kms_restore_vga(dev_priv);
594                 vmw_3d_resource_dec(dev_priv, true);
595         }
596
597         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
598                 ret = drm_irq_install(dev);
599                 if (unlikely(ret != 0)) {
600                         DRM_ERROR("Failed installing irq: %d\n", ret);
601                         goto out_no_irq;
602                 }
603         }
604
605         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
606         register_pm_notifier(&dev_priv->pm_nb);
607
608         return 0;
609
610 out_no_irq:
611         if (dev_priv->enable_fb)
612                 vmw_fb_close(dev_priv);
613         vmw_overlay_close(dev_priv);
614         vmw_kms_close(dev_priv);
615 out_no_kms:
616         /* We still have a 3D resource reference held */
617         if (dev_priv->enable_fb) {
618                 vmw_kms_restore_vga(dev_priv);
619                 vmw_3d_resource_dec(dev_priv, false);
620         }
621 out_no_fifo:
622         vmw_fence_manager_takedown(dev_priv->fman);
623 out_no_fman:
624         if (dev_priv->stealth)
625                 pci_release_region(dev->pdev, 2);
626         else
627                 pci_release_regions(dev->pdev);
628 out_no_device:
629         ttm_object_device_release(&dev_priv->tdev);
630 out_err4:
631         iounmap(dev_priv->mmio_virt);
632 out_err3:
633         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
634                      dev_priv->mmio_size, DRM_MTRR_WC);
635         if (dev_priv->has_gmr)
636                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
637         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
638 out_err2:
639         (void)ttm_bo_device_release(&dev_priv->bdev);
640 out_err1:
641         vmw_ttm_global_release(dev_priv);
642 out_err0:
643         idr_destroy(&dev_priv->surface_idr);
644         idr_destroy(&dev_priv->context_idr);
645         idr_destroy(&dev_priv->stream_idr);
646         kfree(dev_priv);
647         return ret;
648 }
649
650 static int vmw_driver_unload(struct drm_device *dev)
651 {
652         struct vmw_private *dev_priv = vmw_priv(dev);
653
654         unregister_pm_notifier(&dev_priv->pm_nb);
655
656         if (dev_priv->ctx.cmd_bounce)
657                 vfree(dev_priv->ctx.cmd_bounce);
658         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
659                 drm_irq_uninstall(dev_priv->dev);
660         if (dev_priv->enable_fb) {
661                 vmw_fb_close(dev_priv);
662                 vmw_kms_restore_vga(dev_priv);
663                 vmw_3d_resource_dec(dev_priv, false);
664         }
665         vmw_kms_close(dev_priv);
666         vmw_overlay_close(dev_priv);
667         vmw_fence_manager_takedown(dev_priv->fman);
668         if (dev_priv->stealth)
669                 pci_release_region(dev->pdev, 2);
670         else
671                 pci_release_regions(dev->pdev);
672
673         ttm_object_device_release(&dev_priv->tdev);
674         iounmap(dev_priv->mmio_virt);
675         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
676                      dev_priv->mmio_size, DRM_MTRR_WC);
677         if (dev_priv->has_gmr)
678                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
679         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
680         (void)ttm_bo_device_release(&dev_priv->bdev);
681         vmw_ttm_global_release(dev_priv);
682         idr_destroy(&dev_priv->surface_idr);
683         idr_destroy(&dev_priv->context_idr);
684         idr_destroy(&dev_priv->stream_idr);
685
686         kfree(dev_priv);
687
688         return 0;
689 }
690
691 static void vmw_postclose(struct drm_device *dev,
692                          struct drm_file *file_priv)
693 {
694         struct vmw_fpriv *vmw_fp;
695
696         vmw_fp = vmw_fpriv(file_priv);
697         ttm_object_file_release(&vmw_fp->tfile);
698         if (vmw_fp->locked_master)
699                 drm_master_put(&vmw_fp->locked_master);
700         kfree(vmw_fp);
701 }
702
703 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
704 {
705         struct vmw_private *dev_priv = vmw_priv(dev);
706         struct vmw_fpriv *vmw_fp;
707         int ret = -ENOMEM;
708
709         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
710         if (unlikely(vmw_fp == NULL))
711                 return ret;
712
713         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
714         if (unlikely(vmw_fp->tfile == NULL))
715                 goto out_no_tfile;
716
717         file_priv->driver_priv = vmw_fp;
718
719         if (unlikely(dev_priv->bdev.dev_mapping == NULL))
720                 dev_priv->bdev.dev_mapping =
721                         file_priv->filp->f_path.dentry->d_inode->i_mapping;
722
723         return 0;
724
725 out_no_tfile:
726         kfree(vmw_fp);
727         return ret;
728 }
729
730 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
731                                unsigned long arg)
732 {
733         struct drm_file *file_priv = filp->private_data;
734         struct drm_device *dev = file_priv->minor->dev;
735         unsigned int nr = DRM_IOCTL_NR(cmd);
736
737         /*
738          * Do extra checking on driver private ioctls.
739          */
740
741         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
742             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
743                 struct drm_ioctl_desc *ioctl =
744                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
745
746                 if (unlikely(ioctl->cmd_drv != cmd)) {
747                         DRM_ERROR("Invalid command format, ioctl %d\n",
748                                   nr - DRM_COMMAND_BASE);
749                         return -EINVAL;
750                 }
751         }
752
753         return drm_ioctl(filp, cmd, arg);
754 }
755
756 static int vmw_firstopen(struct drm_device *dev)
757 {
758         struct vmw_private *dev_priv = vmw_priv(dev);
759         dev_priv->is_opened = true;
760
761         return 0;
762 }
763
764 static void vmw_lastclose(struct drm_device *dev)
765 {
766         struct vmw_private *dev_priv = vmw_priv(dev);
767         struct drm_crtc *crtc;
768         struct drm_mode_set set;
769         int ret;
770
771         /**
772          * Do nothing on the lastclose call from drm_unload.
773          */
774
775         if (!dev_priv->is_opened)
776                 return;
777
778         dev_priv->is_opened = false;
779         set.x = 0;
780         set.y = 0;
781         set.fb = NULL;
782         set.mode = NULL;
783         set.connectors = NULL;
784         set.num_connectors = 0;
785
786         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
787                 set.crtc = crtc;
788                 ret = crtc->funcs->set_config(&set);
789                 WARN_ON(ret != 0);
790         }
791
792 }
793
794 static void vmw_master_init(struct vmw_master *vmaster)
795 {
796         ttm_lock_init(&vmaster->lock);
797         INIT_LIST_HEAD(&vmaster->fb_surf);
798         mutex_init(&vmaster->fb_surf_mutex);
799 }
800
801 static int vmw_master_create(struct drm_device *dev,
802                              struct drm_master *master)
803 {
804         struct vmw_master *vmaster;
805
806         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
807         if (unlikely(vmaster == NULL))
808                 return -ENOMEM;
809
810         vmw_master_init(vmaster);
811         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
812         master->driver_priv = vmaster;
813
814         return 0;
815 }
816
817 static void vmw_master_destroy(struct drm_device *dev,
818                                struct drm_master *master)
819 {
820         struct vmw_master *vmaster = vmw_master(master);
821
822         master->driver_priv = NULL;
823         kfree(vmaster);
824 }
825
826
827 static int vmw_master_set(struct drm_device *dev,
828                           struct drm_file *file_priv,
829                           bool from_open)
830 {
831         struct vmw_private *dev_priv = vmw_priv(dev);
832         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
833         struct vmw_master *active = dev_priv->active_master;
834         struct vmw_master *vmaster = vmw_master(file_priv->master);
835         int ret = 0;
836
837         if (!dev_priv->enable_fb) {
838                 ret = vmw_3d_resource_inc(dev_priv, true);
839                 if (unlikely(ret != 0))
840                         return ret;
841                 vmw_kms_save_vga(dev_priv);
842                 mutex_lock(&dev_priv->hw_mutex);
843                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
844                 mutex_unlock(&dev_priv->hw_mutex);
845         }
846
847         if (active) {
848                 BUG_ON(active != &dev_priv->fbdev_master);
849                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
850                 if (unlikely(ret != 0))
851                         goto out_no_active_lock;
852
853                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
854                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
855                 if (unlikely(ret != 0)) {
856                         DRM_ERROR("Unable to clean VRAM on "
857                                   "master drop.\n");
858                 }
859
860                 dev_priv->active_master = NULL;
861         }
862
863         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
864         if (!from_open) {
865                 ttm_vt_unlock(&vmaster->lock);
866                 BUG_ON(vmw_fp->locked_master != file_priv->master);
867                 drm_master_put(&vmw_fp->locked_master);
868         }
869
870         dev_priv->active_master = vmaster;
871
872         return 0;
873
874 out_no_active_lock:
875         if (!dev_priv->enable_fb) {
876                 mutex_lock(&dev_priv->hw_mutex);
877                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
878                 mutex_unlock(&dev_priv->hw_mutex);
879                 vmw_kms_restore_vga(dev_priv);
880                 vmw_3d_resource_dec(dev_priv, true);
881         }
882         return ret;
883 }
884
885 static void vmw_master_drop(struct drm_device *dev,
886                             struct drm_file *file_priv,
887                             bool from_release)
888 {
889         struct vmw_private *dev_priv = vmw_priv(dev);
890         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
891         struct vmw_master *vmaster = vmw_master(file_priv->master);
892         int ret;
893
894         /**
895          * Make sure the master doesn't disappear while we have
896          * it locked.
897          */
898
899         vmw_fp->locked_master = drm_master_get(file_priv->master);
900         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
901         vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
902
903         if (unlikely((ret != 0))) {
904                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
905                 drm_master_put(&vmw_fp->locked_master);
906         }
907
908         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
909
910         if (!dev_priv->enable_fb) {
911                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
912                 if (unlikely(ret != 0))
913                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
914                 mutex_lock(&dev_priv->hw_mutex);
915                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
916                 mutex_unlock(&dev_priv->hw_mutex);
917                 vmw_kms_restore_vga(dev_priv);
918                 vmw_3d_resource_dec(dev_priv, true);
919         }
920
921         dev_priv->active_master = &dev_priv->fbdev_master;
922         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
923         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
924
925         if (dev_priv->enable_fb)
926                 vmw_fb_on(dev_priv);
927 }
928
929
930 static void vmw_remove(struct pci_dev *pdev)
931 {
932         struct drm_device *dev = pci_get_drvdata(pdev);
933
934         drm_put_dev(dev);
935 }
936
937 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
938                               void *ptr)
939 {
940         struct vmw_private *dev_priv =
941                 container_of(nb, struct vmw_private, pm_nb);
942         struct vmw_master *vmaster = dev_priv->active_master;
943
944         switch (val) {
945         case PM_HIBERNATION_PREPARE:
946         case PM_SUSPEND_PREPARE:
947                 ttm_suspend_lock(&vmaster->lock);
948
949                 /**
950                  * This empties VRAM and unbinds all GMR bindings.
951                  * Buffer contents is moved to swappable memory.
952                  */
953                 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
954                 ttm_bo_swapout_all(&dev_priv->bdev);
955
956                 break;
957         case PM_POST_HIBERNATION:
958         case PM_POST_SUSPEND:
959         case PM_POST_RESTORE:
960                 ttm_suspend_unlock(&vmaster->lock);
961
962                 break;
963         case PM_RESTORE_PREPARE:
964                 break;
965         default:
966                 break;
967         }
968         return 0;
969 }
970
971 /**
972  * These might not be needed with the virtual SVGA device.
973  */
974
975 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
976 {
977         struct drm_device *dev = pci_get_drvdata(pdev);
978         struct vmw_private *dev_priv = vmw_priv(dev);
979
980         if (dev_priv->num_3d_resources != 0) {
981                 DRM_INFO("Can't suspend or hibernate "
982                          "while 3D resources are active.\n");
983                 return -EBUSY;
984         }
985
986         pci_save_state(pdev);
987         pci_disable_device(pdev);
988         pci_set_power_state(pdev, PCI_D3hot);
989         return 0;
990 }
991
992 static int vmw_pci_resume(struct pci_dev *pdev)
993 {
994         pci_set_power_state(pdev, PCI_D0);
995         pci_restore_state(pdev);
996         return pci_enable_device(pdev);
997 }
998
999 static int vmw_pm_suspend(struct device *kdev)
1000 {
1001         struct pci_dev *pdev = to_pci_dev(kdev);
1002         struct pm_message dummy;
1003
1004         dummy.event = 0;
1005
1006         return vmw_pci_suspend(pdev, dummy);
1007 }
1008
1009 static int vmw_pm_resume(struct device *kdev)
1010 {
1011         struct pci_dev *pdev = to_pci_dev(kdev);
1012
1013         return vmw_pci_resume(pdev);
1014 }
1015
1016 static int vmw_pm_prepare(struct device *kdev)
1017 {
1018         struct pci_dev *pdev = to_pci_dev(kdev);
1019         struct drm_device *dev = pci_get_drvdata(pdev);
1020         struct vmw_private *dev_priv = vmw_priv(dev);
1021
1022         /**
1023          * Release 3d reference held by fbdev and potentially
1024          * stop fifo.
1025          */
1026         dev_priv->suspended = true;
1027         if (dev_priv->enable_fb)
1028                         vmw_3d_resource_dec(dev_priv, true);
1029
1030         if (dev_priv->num_3d_resources != 0) {
1031
1032                 DRM_INFO("Can't suspend or hibernate "
1033                          "while 3D resources are active.\n");
1034
1035                 if (dev_priv->enable_fb)
1036                         vmw_3d_resource_inc(dev_priv, true);
1037                 dev_priv->suspended = false;
1038                 return -EBUSY;
1039         }
1040
1041         return 0;
1042 }
1043
1044 static void vmw_pm_complete(struct device *kdev)
1045 {
1046         struct pci_dev *pdev = to_pci_dev(kdev);
1047         struct drm_device *dev = pci_get_drvdata(pdev);
1048         struct vmw_private *dev_priv = vmw_priv(dev);
1049
1050         /**
1051          * Reclaim 3d reference held by fbdev and potentially
1052          * start fifo.
1053          */
1054         if (dev_priv->enable_fb)
1055                         vmw_3d_resource_inc(dev_priv, false);
1056
1057         dev_priv->suspended = false;
1058 }
1059
1060 static const struct dev_pm_ops vmw_pm_ops = {
1061         .prepare = vmw_pm_prepare,
1062         .complete = vmw_pm_complete,
1063         .suspend = vmw_pm_suspend,
1064         .resume = vmw_pm_resume,
1065 };
1066
1067 static struct drm_driver driver = {
1068         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1069         DRIVER_MODESET,
1070         .load = vmw_driver_load,
1071         .unload = vmw_driver_unload,
1072         .firstopen = vmw_firstopen,
1073         .lastclose = vmw_lastclose,
1074         .irq_preinstall = vmw_irq_preinstall,
1075         .irq_postinstall = vmw_irq_postinstall,
1076         .irq_uninstall = vmw_irq_uninstall,
1077         .irq_handler = vmw_irq_handler,
1078         .get_vblank_counter = vmw_get_vblank_counter,
1079         .enable_vblank = vmw_enable_vblank,
1080         .disable_vblank = vmw_disable_vblank,
1081         .reclaim_buffers_locked = NULL,
1082         .ioctls = vmw_ioctls,
1083         .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1084         .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
1085         .master_create = vmw_master_create,
1086         .master_destroy = vmw_master_destroy,
1087         .master_set = vmw_master_set,
1088         .master_drop = vmw_master_drop,
1089         .open = vmw_driver_open,
1090         .postclose = vmw_postclose,
1091         .fops = {
1092                  .owner = THIS_MODULE,
1093                  .open = drm_open,
1094                  .release = drm_release,
1095                  .unlocked_ioctl = vmw_unlocked_ioctl,
1096                  .mmap = vmw_mmap,
1097                  .poll = vmw_fops_poll,
1098                  .read = vmw_fops_read,
1099                  .fasync = drm_fasync,
1100 #if defined(CONFIG_COMPAT)
1101                  .compat_ioctl = drm_compat_ioctl,
1102 #endif
1103                  .llseek = noop_llseek,
1104         },
1105         .name = VMWGFX_DRIVER_NAME,
1106         .desc = VMWGFX_DRIVER_DESC,
1107         .date = VMWGFX_DRIVER_DATE,
1108         .major = VMWGFX_DRIVER_MAJOR,
1109         .minor = VMWGFX_DRIVER_MINOR,
1110         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1111 };
1112
1113 static struct pci_driver vmw_pci_driver = {
1114         .name = VMWGFX_DRIVER_NAME,
1115         .id_table = vmw_pci_id_list,
1116         .probe = vmw_probe,
1117         .remove = vmw_remove,
1118         .driver = {
1119                 .pm = &vmw_pm_ops
1120         }
1121 };
1122
1123 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1124 {
1125         return drm_get_pci_dev(pdev, ent, &driver);
1126 }
1127
1128 static int __init vmwgfx_init(void)
1129 {
1130         int ret;
1131         ret = drm_pci_init(&driver, &vmw_pci_driver);
1132         if (ret)
1133                 DRM_ERROR("Failed initializing DRM.\n");
1134         return ret;
1135 }
1136
1137 static void __exit vmwgfx_exit(void)
1138 {
1139         drm_pci_exit(&driver, &vmw_pci_driver);
1140 }
1141
1142 module_init(vmwgfx_init);
1143 module_exit(vmwgfx_exit);
1144
1145 MODULE_AUTHOR("VMware Inc. and others");
1146 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1147 MODULE_LICENSE("GPL and additional rights");
1148 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1149                __stringify(VMWGFX_DRIVER_MINOR) "."
1150                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1151                "0");