148fa9120c5fe5b5e3cf8490c100eff2c07e359c
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
29
30 #include "drmP.h"
31 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
33 #include "ttm/ttm_bo_driver.h"
34 #include "ttm/ttm_object.h"
35 #include "ttm/ttm_module.h"
36
37 #define VMWGFX_DRIVER_NAME "vmwgfx"
38 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39 #define VMWGFX_CHIP_SVGAII 0
40 #define VMW_FB_RESERVATION 0
41
42 /**
43  * Fully encoded drm commands. Might move to vmw_drm.h
44  */
45
46 #define DRM_IOCTL_VMW_GET_PARAM                                 \
47         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
48                  struct drm_vmw_getparam_arg)
49 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
50         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
51                 union drm_vmw_alloc_dmabuf_arg)
52 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
53         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
54                 struct drm_vmw_unref_dmabuf_arg)
55 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
56         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
57                  struct drm_vmw_cursor_bypass_arg)
58
59 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
60         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
61                  struct drm_vmw_control_stream_arg)
62 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
63         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
64                  struct drm_vmw_stream_arg)
65 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
66         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
67                  struct drm_vmw_stream_arg)
68
69 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
70         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
71                 struct drm_vmw_context_arg)
72 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
73         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
74                 struct drm_vmw_context_arg)
75 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
76         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
77                  union drm_vmw_surface_create_arg)
78 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
79         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
80                  struct drm_vmw_surface_arg)
81 #define DRM_IOCTL_VMW_REF_SURFACE                               \
82         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
83                  union drm_vmw_surface_reference_arg)
84 #define DRM_IOCTL_VMW_EXECBUF                                   \
85         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
86                 struct drm_vmw_execbuf_arg)
87 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
88         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
89                  struct drm_vmw_get_3d_cap_arg)
90 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
91         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
92                  struct drm_vmw_fence_wait_arg)
93 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
94         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
95                  struct drm_vmw_fence_signaled_arg)
96 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
97         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
98                  struct drm_vmw_fence_arg)
99 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
100         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
101                  struct drm_vmw_fence_event_arg)
102 #define DRM_IOCTL_VMW_PRESENT                                   \
103         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
104                  struct drm_vmw_present_arg)
105 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
106         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
107                  struct drm_vmw_present_readback_arg)
108 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
109         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
110                  struct drm_vmw_update_layout_arg)
111
112 /**
113  * The core DRM version of this macro doesn't account for
114  * DRM_COMMAND_BASE.
115  */
116
117 #define VMW_IOCTL_DEF(ioctl, func, flags) \
118   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
119
120 /**
121  * Ioctl definitions.
122  */
123
124 static struct drm_ioctl_desc vmw_ioctls[] = {
125         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
126                       DRM_AUTH | DRM_UNLOCKED),
127         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
128                       DRM_AUTH | DRM_UNLOCKED),
129         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
130                       DRM_AUTH | DRM_UNLOCKED),
131         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
132                       vmw_kms_cursor_bypass_ioctl,
133                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
134
135         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
136                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
137         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
138                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
139         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
140                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
141
142         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
143                       DRM_AUTH | DRM_UNLOCKED),
144         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
145                       DRM_AUTH | DRM_UNLOCKED),
146         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
147                       DRM_AUTH | DRM_UNLOCKED),
148         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
149                       DRM_AUTH | DRM_UNLOCKED),
150         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
151                       DRM_AUTH | DRM_UNLOCKED),
152         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
153                       DRM_AUTH | DRM_UNLOCKED),
154         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
155                       DRM_AUTH | DRM_UNLOCKED),
156         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
157                       vmw_fence_obj_signaled_ioctl,
158                       DRM_AUTH | DRM_UNLOCKED),
159         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
160                       DRM_AUTH | DRM_UNLOCKED),
161         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
162                       vmw_fence_event_ioctl,
163                       DRM_AUTH | DRM_UNLOCKED),
164         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
165                       DRM_AUTH | DRM_UNLOCKED),
166
167         /* these allow direct access to the framebuffers mark as master only */
168         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
169                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
170         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
171                       vmw_present_readback_ioctl,
172                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
173         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
174                       vmw_kms_update_layout_ioctl,
175                       DRM_MASTER | DRM_UNLOCKED),
176 };
177
178 static struct pci_device_id vmw_pci_id_list[] = {
179         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
180         {0, 0, 0}
181 };
182 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
183
184 static int enable_fbdev;
185
186 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
187 static void vmw_master_init(struct vmw_master *);
188 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
189                               void *ptr);
190
191 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
192 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
193
194 static void vmw_print_capabilities(uint32_t capabilities)
195 {
196         DRM_INFO("Capabilities:\n");
197         if (capabilities & SVGA_CAP_RECT_COPY)
198                 DRM_INFO("  Rect copy.\n");
199         if (capabilities & SVGA_CAP_CURSOR)
200                 DRM_INFO("  Cursor.\n");
201         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
202                 DRM_INFO("  Cursor bypass.\n");
203         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
204                 DRM_INFO("  Cursor bypass 2.\n");
205         if (capabilities & SVGA_CAP_8BIT_EMULATION)
206                 DRM_INFO("  8bit emulation.\n");
207         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
208                 DRM_INFO("  Alpha cursor.\n");
209         if (capabilities & SVGA_CAP_3D)
210                 DRM_INFO("  3D.\n");
211         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
212                 DRM_INFO("  Extended Fifo.\n");
213         if (capabilities & SVGA_CAP_MULTIMON)
214                 DRM_INFO("  Multimon.\n");
215         if (capabilities & SVGA_CAP_PITCHLOCK)
216                 DRM_INFO("  Pitchlock.\n");
217         if (capabilities & SVGA_CAP_IRQMASK)
218                 DRM_INFO("  Irq mask.\n");
219         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
220                 DRM_INFO("  Display Topology.\n");
221         if (capabilities & SVGA_CAP_GMR)
222                 DRM_INFO("  GMR.\n");
223         if (capabilities & SVGA_CAP_TRACES)
224                 DRM_INFO("  Traces.\n");
225         if (capabilities & SVGA_CAP_GMR2)
226                 DRM_INFO("  GMR2.\n");
227         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
228                 DRM_INFO("  Screen Object 2.\n");
229 }
230
231
232 /**
233  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
234  * the start of a buffer object.
235  *
236  * @dev_priv: The device private structure.
237  *
238  * This function will idle the buffer using an uninterruptible wait, then
239  * map the first page and initialize a pending occlusion query result structure,
240  * Finally it will unmap the buffer.
241  *
242  * TODO: Since we're only mapping a single page, we should optimize the map
243  * to use kmap_atomic / iomap_atomic.
244  */
245 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
246 {
247         struct ttm_bo_kmap_obj map;
248         volatile SVGA3dQueryResult *result;
249         bool dummy;
250         int ret;
251         struct ttm_bo_device *bdev = &dev_priv->bdev;
252         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
253
254         ttm_bo_reserve(bo, false, false, false, 0);
255         spin_lock(&bdev->fence_lock);
256         ret = ttm_bo_wait(bo, false, false, false);
257         spin_unlock(&bdev->fence_lock);
258         if (unlikely(ret != 0))
259                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
260                                          10*HZ);
261
262         ret = ttm_bo_kmap(bo, 0, 1, &map);
263         if (likely(ret == 0)) {
264                 result = ttm_kmap_obj_virtual(&map, &dummy);
265                 result->totalSize = sizeof(*result);
266                 result->state = SVGA3D_QUERYSTATE_PENDING;
267                 result->result32 = 0xff;
268                 ttm_bo_kunmap(&map);
269         } else
270                 DRM_ERROR("Dummy query buffer map failed.\n");
271         ttm_bo_unreserve(bo);
272 }
273
274
275 /**
276  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
277  *
278  * @dev_priv: A device private structure.
279  *
280  * This function creates a small buffer object that holds the query
281  * result for dummy queries emitted as query barriers.
282  * No interruptible waits are done within this function.
283  *
284  * Returns an error if bo creation fails.
285  */
286 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
287 {
288         return ttm_bo_create(&dev_priv->bdev,
289                              PAGE_SIZE,
290                              ttm_bo_type_device,
291                              &vmw_vram_sys_placement,
292                              0, 0, false, NULL,
293                              &dev_priv->dummy_query_bo);
294 }
295
296
297 static int vmw_request_device(struct vmw_private *dev_priv)
298 {
299         int ret;
300
301         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
302         if (unlikely(ret != 0)) {
303                 DRM_ERROR("Unable to initialize FIFO.\n");
304                 return ret;
305         }
306         vmw_fence_fifo_up(dev_priv->fman);
307         ret = vmw_dummy_query_bo_create(dev_priv);
308         if (unlikely(ret != 0))
309                 goto out_no_query_bo;
310         vmw_dummy_query_bo_prepare(dev_priv);
311
312         return 0;
313
314 out_no_query_bo:
315         vmw_fence_fifo_down(dev_priv->fman);
316         vmw_fifo_release(dev_priv, &dev_priv->fifo);
317         return ret;
318 }
319
320 static void vmw_release_device(struct vmw_private *dev_priv)
321 {
322         /*
323          * Previous destructions should've released
324          * the pinned bo.
325          */
326
327         BUG_ON(dev_priv->pinned_bo != NULL);
328
329         ttm_bo_unref(&dev_priv->dummy_query_bo);
330         vmw_fence_fifo_down(dev_priv->fman);
331         vmw_fifo_release(dev_priv, &dev_priv->fifo);
332 }
333
334 /**
335  * Increase the 3d resource refcount.
336  * If the count was prevously zero, initialize the fifo, switching to svga
337  * mode. Note that the master holds a ref as well, and may request an
338  * explicit switch to svga mode if fb is not running, using @unhide_svga.
339  */
340 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
341                         bool unhide_svga)
342 {
343         int ret = 0;
344
345         mutex_lock(&dev_priv->release_mutex);
346         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
347                 ret = vmw_request_device(dev_priv);
348                 if (unlikely(ret != 0))
349                         --dev_priv->num_3d_resources;
350         } else if (unhide_svga) {
351                 mutex_lock(&dev_priv->hw_mutex);
352                 vmw_write(dev_priv, SVGA_REG_ENABLE,
353                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
354                           ~SVGA_REG_ENABLE_HIDE);
355                 mutex_unlock(&dev_priv->hw_mutex);
356         }
357
358         mutex_unlock(&dev_priv->release_mutex);
359         return ret;
360 }
361
362 /**
363  * Decrease the 3d resource refcount.
364  * If the count reaches zero, disable the fifo, switching to vga mode.
365  * Note that the master holds a refcount as well, and may request an
366  * explicit switch to vga mode when it releases its refcount to account
367  * for the situation of an X server vt switch to VGA with 3d resources
368  * active.
369  */
370 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
371                          bool hide_svga)
372 {
373         int32_t n3d;
374
375         mutex_lock(&dev_priv->release_mutex);
376         if (unlikely(--dev_priv->num_3d_resources == 0))
377                 vmw_release_device(dev_priv);
378         else if (hide_svga) {
379                 mutex_lock(&dev_priv->hw_mutex);
380                 vmw_write(dev_priv, SVGA_REG_ENABLE,
381                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
382                           SVGA_REG_ENABLE_HIDE);
383                 mutex_unlock(&dev_priv->hw_mutex);
384         }
385
386         n3d = (int32_t) dev_priv->num_3d_resources;
387         mutex_unlock(&dev_priv->release_mutex);
388
389         BUG_ON(n3d < 0);
390 }
391
392 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
393 {
394         struct vmw_private *dev_priv;
395         int ret;
396         uint32_t svga_id;
397
398         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
399         if (unlikely(dev_priv == NULL)) {
400                 DRM_ERROR("Failed allocating a device private struct.\n");
401                 return -ENOMEM;
402         }
403         memset(dev_priv, 0, sizeof(*dev_priv));
404
405         dev_priv->dev = dev;
406         dev_priv->vmw_chipset = chipset;
407         dev_priv->last_read_seqno = (uint32_t) -100;
408         mutex_init(&dev_priv->hw_mutex);
409         mutex_init(&dev_priv->cmdbuf_mutex);
410         mutex_init(&dev_priv->release_mutex);
411         rwlock_init(&dev_priv->resource_lock);
412         idr_init(&dev_priv->context_idr);
413         idr_init(&dev_priv->surface_idr);
414         idr_init(&dev_priv->stream_idr);
415         mutex_init(&dev_priv->init_mutex);
416         init_waitqueue_head(&dev_priv->fence_queue);
417         init_waitqueue_head(&dev_priv->fifo_queue);
418         dev_priv->fence_queue_waiters = 0;
419         atomic_set(&dev_priv->fifo_queue_waiters, 0);
420         INIT_LIST_HEAD(&dev_priv->surface_lru);
421         dev_priv->used_memory_size = 0;
422
423         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
424         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
425         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
426
427         dev_priv->enable_fb = enable_fbdev;
428
429         mutex_lock(&dev_priv->hw_mutex);
430
431         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
432         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
433         if (svga_id != SVGA_ID_2) {
434                 ret = -ENOSYS;
435                 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
436                 mutex_unlock(&dev_priv->hw_mutex);
437                 goto out_err0;
438         }
439
440         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
441
442         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
443         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
444         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
445         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
446         if (dev_priv->capabilities & SVGA_CAP_GMR) {
447                 dev_priv->max_gmr_descriptors =
448                         vmw_read(dev_priv,
449                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
450                 dev_priv->max_gmr_ids =
451                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
452         }
453         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
454                 dev_priv->max_gmr_pages =
455                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
456                 dev_priv->memory_size =
457                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
458                 dev_priv->memory_size -= dev_priv->vram_size;
459         } else {
460                 /*
461                  * An arbitrary limit of 512MiB on surface
462                  * memory. But all HWV8 hardware supports GMR2.
463                  */
464                 dev_priv->memory_size = 512*1024*1024;
465         }
466
467         mutex_unlock(&dev_priv->hw_mutex);
468
469         vmw_print_capabilities(dev_priv->capabilities);
470
471         if (dev_priv->capabilities & SVGA_CAP_GMR) {
472                 DRM_INFO("Max GMR ids is %u\n",
473                          (unsigned)dev_priv->max_gmr_ids);
474                 DRM_INFO("Max GMR descriptors is %u\n",
475                          (unsigned)dev_priv->max_gmr_descriptors);
476         }
477         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
478                 DRM_INFO("Max number of GMR pages is %u\n",
479                          (unsigned)dev_priv->max_gmr_pages);
480                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
481                          (unsigned)dev_priv->memory_size / 1024);
482         }
483         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
484                  dev_priv->vram_start, dev_priv->vram_size / 1024);
485         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
486                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
487
488         ret = vmw_ttm_global_init(dev_priv);
489         if (unlikely(ret != 0))
490                 goto out_err0;
491
492
493         vmw_master_init(&dev_priv->fbdev_master);
494         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
495         dev_priv->active_master = &dev_priv->fbdev_master;
496
497
498         ret = ttm_bo_device_init(&dev_priv->bdev,
499                                  dev_priv->bo_global_ref.ref.object,
500                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
501                                  false);
502         if (unlikely(ret != 0)) {
503                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
504                 goto out_err1;
505         }
506
507         dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
508                                            dev_priv->mmio_size, DRM_MTRR_WC);
509
510         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
511                                          dev_priv->mmio_size);
512
513         if (unlikely(dev_priv->mmio_virt == NULL)) {
514                 ret = -ENOMEM;
515                 DRM_ERROR("Failed mapping MMIO.\n");
516                 goto out_err3;
517         }
518
519         /* Need mmio memory to check for fifo pitchlock cap. */
520         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
521             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
522             !vmw_fifo_have_pitchlock(dev_priv)) {
523                 ret = -ENOSYS;
524                 DRM_ERROR("Hardware has no pitchlock\n");
525                 goto out_err4;
526         }
527
528         dev_priv->tdev = ttm_object_device_init
529             (dev_priv->mem_global_ref.object, 12);
530
531         if (unlikely(dev_priv->tdev == NULL)) {
532                 DRM_ERROR("Unable to initialize TTM object management.\n");
533                 ret = -ENOMEM;
534                 goto out_err4;
535         }
536
537         dev->dev_private = dev_priv;
538
539         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
540         dev_priv->stealth = (ret != 0);
541         if (dev_priv->stealth) {
542                 /**
543                  * Request at least the mmio PCI resource.
544                  */
545
546                 DRM_INFO("It appears like vesafb is loaded. "
547                          "Ignore above error if any.\n");
548                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
549                 if (unlikely(ret != 0)) {
550                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
551                         goto out_no_device;
552                 }
553         }
554
555         dev_priv->fman = vmw_fence_manager_init(dev_priv);
556         if (unlikely(dev_priv->fman == NULL))
557                 goto out_no_fman;
558
559
560         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
561                              (dev_priv->vram_size >> PAGE_SHIFT));
562         if (unlikely(ret != 0)) {
563                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
564                 goto out_no_vram;
565         }
566
567         dev_priv->has_gmr = true;
568         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
569                            dev_priv->max_gmr_ids) != 0) {
570                 DRM_INFO("No GMR memory available. "
571                          "Graphics memory resources are very limited.\n");
572                 dev_priv->has_gmr = false;
573         }
574
575         /* Need to start the fifo to check if we can do screen objects */
576         ret = vmw_3d_resource_inc(dev_priv, true);
577         if (unlikely(ret != 0))
578                 goto out_no_fifo;
579         vmw_kms_save_vga(dev_priv);
580
581         /* Start kms and overlay systems, needs fifo. */
582         ret = vmw_kms_init(dev_priv);
583         if (unlikely(ret != 0))
584                 goto out_no_kms;
585         vmw_overlay_init(dev_priv);
586
587         /* 3D Depends on Screen Objects being used. */
588         DRM_INFO("Detected %sdevice 3D availability.\n",
589                  vmw_fifo_have_3d(dev_priv) ?
590                  "" : "no ");
591
592         /* We might be done with the fifo now */
593         if (dev_priv->enable_fb) {
594                 vmw_fb_init(dev_priv);
595         } else {
596                 vmw_kms_restore_vga(dev_priv);
597                 vmw_3d_resource_dec(dev_priv, true);
598         }
599
600         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
601                 ret = drm_irq_install(dev);
602                 if (unlikely(ret != 0)) {
603                         DRM_ERROR("Failed installing irq: %d\n", ret);
604                         goto out_no_irq;
605                 }
606         }
607
608         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
609         register_pm_notifier(&dev_priv->pm_nb);
610
611         return 0;
612
613 out_no_irq:
614         if (dev_priv->enable_fb)
615                 vmw_fb_close(dev_priv);
616         vmw_overlay_close(dev_priv);
617         vmw_kms_close(dev_priv);
618 out_no_kms:
619         /* We still have a 3D resource reference held */
620         if (dev_priv->enable_fb) {
621                 vmw_kms_restore_vga(dev_priv);
622                 vmw_3d_resource_dec(dev_priv, false);
623         }
624 out_no_fifo:
625         if (dev_priv->has_gmr)
626                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
627         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
628 out_no_vram:
629         vmw_fence_manager_takedown(dev_priv->fman);
630 out_no_fman:
631         if (dev_priv->stealth)
632                 pci_release_region(dev->pdev, 2);
633         else
634                 pci_release_regions(dev->pdev);
635 out_no_device:
636         ttm_object_device_release(&dev_priv->tdev);
637 out_err4:
638         iounmap(dev_priv->mmio_virt);
639 out_err3:
640         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
641                      dev_priv->mmio_size, DRM_MTRR_WC);
642         (void)ttm_bo_device_release(&dev_priv->bdev);
643 out_err1:
644         vmw_ttm_global_release(dev_priv);
645 out_err0:
646         idr_destroy(&dev_priv->surface_idr);
647         idr_destroy(&dev_priv->context_idr);
648         idr_destroy(&dev_priv->stream_idr);
649         kfree(dev_priv);
650         return ret;
651 }
652
653 static int vmw_driver_unload(struct drm_device *dev)
654 {
655         struct vmw_private *dev_priv = vmw_priv(dev);
656
657         unregister_pm_notifier(&dev_priv->pm_nb);
658
659         if (dev_priv->ctx.cmd_bounce)
660                 vfree(dev_priv->ctx.cmd_bounce);
661         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
662                 drm_irq_uninstall(dev_priv->dev);
663         if (dev_priv->enable_fb) {
664                 vmw_fb_close(dev_priv);
665                 vmw_kms_restore_vga(dev_priv);
666                 vmw_3d_resource_dec(dev_priv, false);
667         }
668         vmw_kms_close(dev_priv);
669         vmw_overlay_close(dev_priv);
670
671         if (dev_priv->has_gmr)
672                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
673         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
674
675         vmw_fence_manager_takedown(dev_priv->fman);
676         if (dev_priv->stealth)
677                 pci_release_region(dev->pdev, 2);
678         else
679                 pci_release_regions(dev->pdev);
680
681         ttm_object_device_release(&dev_priv->tdev);
682         iounmap(dev_priv->mmio_virt);
683         drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
684                      dev_priv->mmio_size, DRM_MTRR_WC);
685         (void)ttm_bo_device_release(&dev_priv->bdev);
686         vmw_ttm_global_release(dev_priv);
687         idr_destroy(&dev_priv->surface_idr);
688         idr_destroy(&dev_priv->context_idr);
689         idr_destroy(&dev_priv->stream_idr);
690
691         kfree(dev_priv);
692
693         return 0;
694 }
695
696 static void vmw_postclose(struct drm_device *dev,
697                          struct drm_file *file_priv)
698 {
699         struct vmw_fpriv *vmw_fp;
700
701         vmw_fp = vmw_fpriv(file_priv);
702         ttm_object_file_release(&vmw_fp->tfile);
703         if (vmw_fp->locked_master)
704                 drm_master_put(&vmw_fp->locked_master);
705         kfree(vmw_fp);
706 }
707
708 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
709 {
710         struct vmw_private *dev_priv = vmw_priv(dev);
711         struct vmw_fpriv *vmw_fp;
712         int ret = -ENOMEM;
713
714         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
715         if (unlikely(vmw_fp == NULL))
716                 return ret;
717
718         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
719         if (unlikely(vmw_fp->tfile == NULL))
720                 goto out_no_tfile;
721
722         file_priv->driver_priv = vmw_fp;
723
724         if (unlikely(dev_priv->bdev.dev_mapping == NULL))
725                 dev_priv->bdev.dev_mapping =
726                         file_priv->filp->f_path.dentry->d_inode->i_mapping;
727
728         return 0;
729
730 out_no_tfile:
731         kfree(vmw_fp);
732         return ret;
733 }
734
735 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
736                                unsigned long arg)
737 {
738         struct drm_file *file_priv = filp->private_data;
739         struct drm_device *dev = file_priv->minor->dev;
740         unsigned int nr = DRM_IOCTL_NR(cmd);
741
742         /*
743          * Do extra checking on driver private ioctls.
744          */
745
746         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
747             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
748                 struct drm_ioctl_desc *ioctl =
749                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
750
751                 if (unlikely(ioctl->cmd_drv != cmd)) {
752                         DRM_ERROR("Invalid command format, ioctl %d\n",
753                                   nr - DRM_COMMAND_BASE);
754                         return -EINVAL;
755                 }
756         }
757
758         return drm_ioctl(filp, cmd, arg);
759 }
760
761 static int vmw_firstopen(struct drm_device *dev)
762 {
763         struct vmw_private *dev_priv = vmw_priv(dev);
764         dev_priv->is_opened = true;
765
766         return 0;
767 }
768
769 static void vmw_lastclose(struct drm_device *dev)
770 {
771         struct vmw_private *dev_priv = vmw_priv(dev);
772         struct drm_crtc *crtc;
773         struct drm_mode_set set;
774         int ret;
775
776         /**
777          * Do nothing on the lastclose call from drm_unload.
778          */
779
780         if (!dev_priv->is_opened)
781                 return;
782
783         dev_priv->is_opened = false;
784         set.x = 0;
785         set.y = 0;
786         set.fb = NULL;
787         set.mode = NULL;
788         set.connectors = NULL;
789         set.num_connectors = 0;
790
791         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
792                 set.crtc = crtc;
793                 ret = crtc->funcs->set_config(&set);
794                 WARN_ON(ret != 0);
795         }
796
797 }
798
799 static void vmw_master_init(struct vmw_master *vmaster)
800 {
801         ttm_lock_init(&vmaster->lock);
802         INIT_LIST_HEAD(&vmaster->fb_surf);
803         mutex_init(&vmaster->fb_surf_mutex);
804 }
805
806 static int vmw_master_create(struct drm_device *dev,
807                              struct drm_master *master)
808 {
809         struct vmw_master *vmaster;
810
811         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
812         if (unlikely(vmaster == NULL))
813                 return -ENOMEM;
814
815         vmw_master_init(vmaster);
816         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
817         master->driver_priv = vmaster;
818
819         return 0;
820 }
821
822 static void vmw_master_destroy(struct drm_device *dev,
823                                struct drm_master *master)
824 {
825         struct vmw_master *vmaster = vmw_master(master);
826
827         master->driver_priv = NULL;
828         kfree(vmaster);
829 }
830
831
832 static int vmw_master_set(struct drm_device *dev,
833                           struct drm_file *file_priv,
834                           bool from_open)
835 {
836         struct vmw_private *dev_priv = vmw_priv(dev);
837         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
838         struct vmw_master *active = dev_priv->active_master;
839         struct vmw_master *vmaster = vmw_master(file_priv->master);
840         int ret = 0;
841
842         if (!dev_priv->enable_fb) {
843                 ret = vmw_3d_resource_inc(dev_priv, true);
844                 if (unlikely(ret != 0))
845                         return ret;
846                 vmw_kms_save_vga(dev_priv);
847                 mutex_lock(&dev_priv->hw_mutex);
848                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
849                 mutex_unlock(&dev_priv->hw_mutex);
850         }
851
852         if (active) {
853                 BUG_ON(active != &dev_priv->fbdev_master);
854                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
855                 if (unlikely(ret != 0))
856                         goto out_no_active_lock;
857
858                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
859                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
860                 if (unlikely(ret != 0)) {
861                         DRM_ERROR("Unable to clean VRAM on "
862                                   "master drop.\n");
863                 }
864
865                 dev_priv->active_master = NULL;
866         }
867
868         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
869         if (!from_open) {
870                 ttm_vt_unlock(&vmaster->lock);
871                 BUG_ON(vmw_fp->locked_master != file_priv->master);
872                 drm_master_put(&vmw_fp->locked_master);
873         }
874
875         dev_priv->active_master = vmaster;
876
877         return 0;
878
879 out_no_active_lock:
880         if (!dev_priv->enable_fb) {
881                 mutex_lock(&dev_priv->hw_mutex);
882                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
883                 mutex_unlock(&dev_priv->hw_mutex);
884                 vmw_kms_restore_vga(dev_priv);
885                 vmw_3d_resource_dec(dev_priv, true);
886         }
887         return ret;
888 }
889
890 static void vmw_master_drop(struct drm_device *dev,
891                             struct drm_file *file_priv,
892                             bool from_release)
893 {
894         struct vmw_private *dev_priv = vmw_priv(dev);
895         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
896         struct vmw_master *vmaster = vmw_master(file_priv->master);
897         int ret;
898
899         /**
900          * Make sure the master doesn't disappear while we have
901          * it locked.
902          */
903
904         vmw_fp->locked_master = drm_master_get(file_priv->master);
905         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
906         vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
907
908         if (unlikely((ret != 0))) {
909                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
910                 drm_master_put(&vmw_fp->locked_master);
911         }
912
913         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
914
915         if (!dev_priv->enable_fb) {
916                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
917                 if (unlikely(ret != 0))
918                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
919                 mutex_lock(&dev_priv->hw_mutex);
920                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
921                 mutex_unlock(&dev_priv->hw_mutex);
922                 vmw_kms_restore_vga(dev_priv);
923                 vmw_3d_resource_dec(dev_priv, true);
924         }
925
926         dev_priv->active_master = &dev_priv->fbdev_master;
927         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
928         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
929
930         if (dev_priv->enable_fb)
931                 vmw_fb_on(dev_priv);
932 }
933
934
935 static void vmw_remove(struct pci_dev *pdev)
936 {
937         struct drm_device *dev = pci_get_drvdata(pdev);
938
939         drm_put_dev(dev);
940 }
941
942 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
943                               void *ptr)
944 {
945         struct vmw_private *dev_priv =
946                 container_of(nb, struct vmw_private, pm_nb);
947         struct vmw_master *vmaster = dev_priv->active_master;
948
949         switch (val) {
950         case PM_HIBERNATION_PREPARE:
951         case PM_SUSPEND_PREPARE:
952                 ttm_suspend_lock(&vmaster->lock);
953
954                 /**
955                  * This empties VRAM and unbinds all GMR bindings.
956                  * Buffer contents is moved to swappable memory.
957                  */
958                 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
959                 ttm_bo_swapout_all(&dev_priv->bdev);
960
961                 break;
962         case PM_POST_HIBERNATION:
963         case PM_POST_SUSPEND:
964         case PM_POST_RESTORE:
965                 ttm_suspend_unlock(&vmaster->lock);
966
967                 break;
968         case PM_RESTORE_PREPARE:
969                 break;
970         default:
971                 break;
972         }
973         return 0;
974 }
975
976 /**
977  * These might not be needed with the virtual SVGA device.
978  */
979
980 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
981 {
982         struct drm_device *dev = pci_get_drvdata(pdev);
983         struct vmw_private *dev_priv = vmw_priv(dev);
984
985         if (dev_priv->num_3d_resources != 0) {
986                 DRM_INFO("Can't suspend or hibernate "
987                          "while 3D resources are active.\n");
988                 return -EBUSY;
989         }
990
991         pci_save_state(pdev);
992         pci_disable_device(pdev);
993         pci_set_power_state(pdev, PCI_D3hot);
994         return 0;
995 }
996
997 static int vmw_pci_resume(struct pci_dev *pdev)
998 {
999         pci_set_power_state(pdev, PCI_D0);
1000         pci_restore_state(pdev);
1001         return pci_enable_device(pdev);
1002 }
1003
1004 static int vmw_pm_suspend(struct device *kdev)
1005 {
1006         struct pci_dev *pdev = to_pci_dev(kdev);
1007         struct pm_message dummy;
1008
1009         dummy.event = 0;
1010
1011         return vmw_pci_suspend(pdev, dummy);
1012 }
1013
1014 static int vmw_pm_resume(struct device *kdev)
1015 {
1016         struct pci_dev *pdev = to_pci_dev(kdev);
1017
1018         return vmw_pci_resume(pdev);
1019 }
1020
1021 static int vmw_pm_prepare(struct device *kdev)
1022 {
1023         struct pci_dev *pdev = to_pci_dev(kdev);
1024         struct drm_device *dev = pci_get_drvdata(pdev);
1025         struct vmw_private *dev_priv = vmw_priv(dev);
1026
1027         /**
1028          * Release 3d reference held by fbdev and potentially
1029          * stop fifo.
1030          */
1031         dev_priv->suspended = true;
1032         if (dev_priv->enable_fb)
1033                         vmw_3d_resource_dec(dev_priv, true);
1034
1035         if (dev_priv->num_3d_resources != 0) {
1036
1037                 DRM_INFO("Can't suspend or hibernate "
1038                          "while 3D resources are active.\n");
1039
1040                 if (dev_priv->enable_fb)
1041                         vmw_3d_resource_inc(dev_priv, true);
1042                 dev_priv->suspended = false;
1043                 return -EBUSY;
1044         }
1045
1046         return 0;
1047 }
1048
1049 static void vmw_pm_complete(struct device *kdev)
1050 {
1051         struct pci_dev *pdev = to_pci_dev(kdev);
1052         struct drm_device *dev = pci_get_drvdata(pdev);
1053         struct vmw_private *dev_priv = vmw_priv(dev);
1054
1055         mutex_lock(&dev_priv->hw_mutex);
1056         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1057         (void) vmw_read(dev_priv, SVGA_REG_ID);
1058         mutex_unlock(&dev_priv->hw_mutex);
1059
1060         /**
1061          * Reclaim 3d reference held by fbdev and potentially
1062          * start fifo.
1063          */
1064         if (dev_priv->enable_fb)
1065                         vmw_3d_resource_inc(dev_priv, false);
1066
1067         dev_priv->suspended = false;
1068 }
1069
1070 static const struct dev_pm_ops vmw_pm_ops = {
1071         .prepare = vmw_pm_prepare,
1072         .complete = vmw_pm_complete,
1073         .suspend = vmw_pm_suspend,
1074         .resume = vmw_pm_resume,
1075 };
1076
1077 static struct drm_driver driver = {
1078         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1079         DRIVER_MODESET,
1080         .load = vmw_driver_load,
1081         .unload = vmw_driver_unload,
1082         .firstopen = vmw_firstopen,
1083         .lastclose = vmw_lastclose,
1084         .irq_preinstall = vmw_irq_preinstall,
1085         .irq_postinstall = vmw_irq_postinstall,
1086         .irq_uninstall = vmw_irq_uninstall,
1087         .irq_handler = vmw_irq_handler,
1088         .get_vblank_counter = vmw_get_vblank_counter,
1089         .enable_vblank = vmw_enable_vblank,
1090         .disable_vblank = vmw_disable_vblank,
1091         .reclaim_buffers_locked = NULL,
1092         .ioctls = vmw_ioctls,
1093         .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1094         .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
1095         .master_create = vmw_master_create,
1096         .master_destroy = vmw_master_destroy,
1097         .master_set = vmw_master_set,
1098         .master_drop = vmw_master_drop,
1099         .open = vmw_driver_open,
1100         .postclose = vmw_postclose,
1101
1102         .dumb_create = vmw_dumb_create,
1103         .dumb_map_offset = vmw_dumb_map_offset,
1104         .dumb_destroy = vmw_dumb_destroy,
1105
1106         .fops = {
1107                  .owner = THIS_MODULE,
1108                  .open = drm_open,
1109                  .release = drm_release,
1110                  .unlocked_ioctl = vmw_unlocked_ioctl,
1111                  .mmap = vmw_mmap,
1112                  .poll = vmw_fops_poll,
1113                  .read = vmw_fops_read,
1114                  .fasync = drm_fasync,
1115 #if defined(CONFIG_COMPAT)
1116                  .compat_ioctl = drm_compat_ioctl,
1117 #endif
1118                  .llseek = noop_llseek,
1119         },
1120         .name = VMWGFX_DRIVER_NAME,
1121         .desc = VMWGFX_DRIVER_DESC,
1122         .date = VMWGFX_DRIVER_DATE,
1123         .major = VMWGFX_DRIVER_MAJOR,
1124         .minor = VMWGFX_DRIVER_MINOR,
1125         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1126 };
1127
1128 static struct pci_driver vmw_pci_driver = {
1129         .name = VMWGFX_DRIVER_NAME,
1130         .id_table = vmw_pci_id_list,
1131         .probe = vmw_probe,
1132         .remove = vmw_remove,
1133         .driver = {
1134                 .pm = &vmw_pm_ops
1135         }
1136 };
1137
1138 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1139 {
1140         return drm_get_pci_dev(pdev, ent, &driver);
1141 }
1142
1143 static int __init vmwgfx_init(void)
1144 {
1145         int ret;
1146
1147 #ifdef CONFIG_VGA_CONSOLE
1148         if (vgacon_text_force())
1149                 return -EINVAL;
1150 #endif
1151
1152         ret = drm_pci_init(&driver, &vmw_pci_driver);
1153         if (ret)
1154                 DRM_ERROR("Failed initializing DRM.\n");
1155         return ret;
1156 }
1157
1158 static void __exit vmwgfx_exit(void)
1159 {
1160         drm_pci_exit(&driver, &vmw_pci_driver);
1161 }
1162
1163 module_init(vmwgfx_init);
1164 module_exit(vmwgfx_exit);
1165
1166 MODULE_AUTHOR("VMware Inc. and others");
1167 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1168 MODULE_LICENSE("GPL and additional rights");
1169 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1170                __stringify(VMWGFX_DRIVER_MINOR) "."
1171                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1172                "0");