pandora: defconfig: update
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33
34 struct vmw_user_context {
35         struct ttm_base_object base;
36         struct vmw_resource res;
37 };
38
39 struct vmw_user_surface {
40         struct ttm_base_object base;
41         struct vmw_surface srf;
42         uint32_t size;
43 };
44
45 struct vmw_user_dma_buffer {
46         struct ttm_base_object base;
47         struct vmw_dma_buffer dma;
48 };
49
50 struct vmw_bo_user_rep {
51         uint32_t handle;
52         uint64_t map_handle;
53 };
54
55 struct vmw_stream {
56         struct vmw_resource res;
57         uint32_t stream_id;
58 };
59
60 struct vmw_user_stream {
61         struct ttm_base_object base;
62         struct vmw_stream stream;
63 };
64
65 struct vmw_surface_offset {
66         uint32_t face;
67         uint32_t mip;
68         uint32_t bo_offset;
69 };
70
71
72 static uint64_t vmw_user_context_size;
73 static uint64_t vmw_user_surface_size;
74 static uint64_t vmw_user_stream_size;
75
76 static inline struct vmw_dma_buffer *
77 vmw_dma_buffer(struct ttm_buffer_object *bo)
78 {
79         return container_of(bo, struct vmw_dma_buffer, base);
80 }
81
82 static inline struct vmw_user_dma_buffer *
83 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
84 {
85         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
86         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
87 }
88
89 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
90 {
91         kref_get(&res->kref);
92         return res;
93 }
94
95
96 /**
97  * vmw_resource_release_id - release a resource id to the id manager.
98  *
99  * @res: Pointer to the resource.
100  *
101  * Release the resource id to the resource id manager and set it to -1
102  */
103 static void vmw_resource_release_id(struct vmw_resource *res)
104 {
105         struct vmw_private *dev_priv = res->dev_priv;
106
107         write_lock(&dev_priv->resource_lock);
108         if (res->id != -1)
109                 idr_remove(res->idr, res->id);
110         res->id = -1;
111         write_unlock(&dev_priv->resource_lock);
112 }
113
114 static void vmw_resource_release(struct kref *kref)
115 {
116         struct vmw_resource *res =
117             container_of(kref, struct vmw_resource, kref);
118         struct vmw_private *dev_priv = res->dev_priv;
119         int id = res->id;
120         struct idr *idr = res->idr;
121
122         res->avail = false;
123         if (res->remove_from_lists != NULL)
124                 res->remove_from_lists(res);
125         write_unlock(&dev_priv->resource_lock);
126
127         if (likely(res->hw_destroy != NULL))
128                 res->hw_destroy(res);
129
130         if (res->res_free != NULL)
131                 res->res_free(res);
132         else
133                 kfree(res);
134
135         write_lock(&dev_priv->resource_lock);
136
137         if (id != -1)
138                 idr_remove(idr, id);
139 }
140
141 void vmw_resource_unreference(struct vmw_resource **p_res)
142 {
143         struct vmw_resource *res = *p_res;
144         struct vmw_private *dev_priv = res->dev_priv;
145
146         *p_res = NULL;
147         write_lock(&dev_priv->resource_lock);
148         kref_put(&res->kref, vmw_resource_release);
149         write_unlock(&dev_priv->resource_lock);
150 }
151
152
153 /**
154  * vmw_resource_alloc_id - release a resource id to the id manager.
155  *
156  * @dev_priv: Pointer to the device private structure.
157  * @res: Pointer to the resource.
158  *
159  * Allocate the lowest free resource from the resource manager, and set
160  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
161  */
162 static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
163                                  struct vmw_resource *res)
164 {
165         int ret;
166
167         BUG_ON(res->id != -1);
168
169         do {
170                 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
171                         return -ENOMEM;
172
173                 write_lock(&dev_priv->resource_lock);
174                 ret = idr_get_new_above(res->idr, res, 1, &res->id);
175                 write_unlock(&dev_priv->resource_lock);
176
177         } while (ret == -EAGAIN);
178
179         return ret;
180 }
181
182
183 static int vmw_resource_init(struct vmw_private *dev_priv,
184                              struct vmw_resource *res,
185                              struct idr *idr,
186                              enum ttm_object_type obj_type,
187                              bool delay_id,
188                              void (*res_free) (struct vmw_resource *res),
189                              void (*remove_from_lists)
190                              (struct vmw_resource *res))
191 {
192         kref_init(&res->kref);
193         res->hw_destroy = NULL;
194         res->res_free = res_free;
195         res->remove_from_lists = remove_from_lists;
196         res->res_type = obj_type;
197         res->idr = idr;
198         res->avail = false;
199         res->dev_priv = dev_priv;
200         INIT_LIST_HEAD(&res->query_head);
201         INIT_LIST_HEAD(&res->validate_head);
202         res->id = -1;
203         if (delay_id)
204                 return 0;
205         else
206                 return vmw_resource_alloc_id(dev_priv, res);
207 }
208
209 /**
210  * vmw_resource_activate
211  *
212  * @res:        Pointer to the newly created resource
213  * @hw_destroy: Destroy function. NULL if none.
214  *
215  * Activate a resource after the hardware has been made aware of it.
216  * Set tye destroy function to @destroy. Typically this frees the
217  * resource and destroys the hardware resources associated with it.
218  * Activate basically means that the function vmw_resource_lookup will
219  * find it.
220  */
221
222 static void vmw_resource_activate(struct vmw_resource *res,
223                                   void (*hw_destroy) (struct vmw_resource *))
224 {
225         struct vmw_private *dev_priv = res->dev_priv;
226
227         write_lock(&dev_priv->resource_lock);
228         res->avail = true;
229         res->hw_destroy = hw_destroy;
230         write_unlock(&dev_priv->resource_lock);
231 }
232
233 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
234                                          struct idr *idr, int id)
235 {
236         struct vmw_resource *res;
237
238         read_lock(&dev_priv->resource_lock);
239         res = idr_find(idr, id);
240         if (res && res->avail)
241                 kref_get(&res->kref);
242         else
243                 res = NULL;
244         read_unlock(&dev_priv->resource_lock);
245
246         if (unlikely(res == NULL))
247                 return NULL;
248
249         return res;
250 }
251
252 /**
253  * Context management:
254  */
255
256 static void vmw_hw_context_destroy(struct vmw_resource *res)
257 {
258
259         struct vmw_private *dev_priv = res->dev_priv;
260         struct {
261                 SVGA3dCmdHeader header;
262                 SVGA3dCmdDestroyContext body;
263         } *cmd;
264
265
266         vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
267
268         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
269         if (unlikely(cmd == NULL)) {
270                 DRM_ERROR("Failed reserving FIFO space for surface "
271                           "destruction.\n");
272                 return;
273         }
274
275         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
276         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
277         cmd->body.cid = cpu_to_le32(res->id);
278
279         vmw_fifo_commit(dev_priv, sizeof(*cmd));
280         vmw_3d_resource_dec(dev_priv, false);
281 }
282
283 static int vmw_context_init(struct vmw_private *dev_priv,
284                             struct vmw_resource *res,
285                             void (*res_free) (struct vmw_resource *res))
286 {
287         int ret;
288
289         struct {
290                 SVGA3dCmdHeader header;
291                 SVGA3dCmdDefineContext body;
292         } *cmd;
293
294         ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
295                                 VMW_RES_CONTEXT, false, res_free, NULL);
296
297         if (unlikely(ret != 0)) {
298                 DRM_ERROR("Failed to allocate a resource id.\n");
299                 goto out_early;
300         }
301
302         if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
303                 DRM_ERROR("Out of hw context ids.\n");
304                 vmw_resource_unreference(&res);
305                 return -ENOMEM;
306         }
307
308         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
309         if (unlikely(cmd == NULL)) {
310                 DRM_ERROR("Fifo reserve failed.\n");
311                 vmw_resource_unreference(&res);
312                 return -ENOMEM;
313         }
314
315         cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
316         cmd->header.size = cpu_to_le32(sizeof(cmd->body));
317         cmd->body.cid = cpu_to_le32(res->id);
318
319         vmw_fifo_commit(dev_priv, sizeof(*cmd));
320         (void) vmw_3d_resource_inc(dev_priv, false);
321         vmw_resource_activate(res, vmw_hw_context_destroy);
322         return 0;
323
324 out_early:
325         if (res_free == NULL)
326                 kfree(res);
327         else
328                 res_free(res);
329         return ret;
330 }
331
332 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
333 {
334         struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
335         int ret;
336
337         if (unlikely(res == NULL))
338                 return NULL;
339
340         ret = vmw_context_init(dev_priv, res, NULL);
341         return (ret == 0) ? res : NULL;
342 }
343
344 /**
345  * User-space context management:
346  */
347
348 static void vmw_user_context_free(struct vmw_resource *res)
349 {
350         struct vmw_user_context *ctx =
351             container_of(res, struct vmw_user_context, res);
352         struct vmw_private *dev_priv = res->dev_priv;
353
354         kfree(ctx);
355         ttm_mem_global_free(vmw_mem_glob(dev_priv),
356                             vmw_user_context_size);
357 }
358
359 /**
360  * This function is called when user space has no more references on the
361  * base object. It releases the base-object's reference on the resource object.
362  */
363
364 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
365 {
366         struct ttm_base_object *base = *p_base;
367         struct vmw_user_context *ctx =
368             container_of(base, struct vmw_user_context, base);
369         struct vmw_resource *res = &ctx->res;
370
371         *p_base = NULL;
372         vmw_resource_unreference(&res);
373 }
374
375 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
376                               struct drm_file *file_priv)
377 {
378         struct vmw_private *dev_priv = vmw_priv(dev);
379         struct vmw_resource *res;
380         struct vmw_user_context *ctx;
381         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
382         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
383         int ret = 0;
384
385         res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
386         if (unlikely(res == NULL))
387                 return -EINVAL;
388
389         if (res->res_free != &vmw_user_context_free) {
390                 ret = -EINVAL;
391                 goto out;
392         }
393
394         ctx = container_of(res, struct vmw_user_context, res);
395         if (ctx->base.tfile != tfile && !ctx->base.shareable) {
396                 ret = -EPERM;
397                 goto out;
398         }
399
400         ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
401 out:
402         vmw_resource_unreference(&res);
403         return ret;
404 }
405
406 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
407                              struct drm_file *file_priv)
408 {
409         struct vmw_private *dev_priv = vmw_priv(dev);
410         struct vmw_user_context *ctx;
411         struct vmw_resource *res;
412         struct vmw_resource *tmp;
413         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
414         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
415         struct vmw_master *vmaster = vmw_master(file_priv->master);
416         int ret;
417
418
419         /*
420          * Approximate idr memory usage with 128 bytes. It will be limited
421          * by maximum number_of contexts anyway.
422          */
423
424         if (unlikely(vmw_user_context_size == 0))
425                 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
426
427         ret = ttm_read_lock(&vmaster->lock, true);
428         if (unlikely(ret != 0))
429                 return ret;
430
431         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432                                    vmw_user_context_size,
433                                    false, true);
434         if (unlikely(ret != 0)) {
435                 if (ret != -ERESTARTSYS)
436                         DRM_ERROR("Out of graphics memory for context"
437                                   " creation.\n");
438                 goto out_unlock;
439         }
440
441         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442         if (unlikely(ctx == NULL)) {
443                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
444                                     vmw_user_context_size);
445                 ret = -ENOMEM;
446                 goto out_unlock;
447         }
448
449         res = &ctx->res;
450         ctx->base.shareable = false;
451         ctx->base.tfile = NULL;
452
453         /*
454          * From here on, the destructor takes over resource freeing.
455          */
456
457         ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
458         if (unlikely(ret != 0))
459                 goto out_unlock;
460
461         tmp = vmw_resource_reference(&ctx->res);
462         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
463                                    &vmw_user_context_base_release, NULL);
464
465         if (unlikely(ret != 0)) {
466                 vmw_resource_unreference(&tmp);
467                 goto out_err;
468         }
469
470         arg->cid = res->id;
471 out_err:
472         vmw_resource_unreference(&res);
473 out_unlock:
474         ttm_read_unlock(&vmaster->lock);
475         return ret;
476
477 }
478
479 int vmw_context_check(struct vmw_private *dev_priv,
480                       struct ttm_object_file *tfile,
481                       int id,
482                       struct vmw_resource **p_res)
483 {
484         struct vmw_resource *res;
485         int ret = 0;
486
487         read_lock(&dev_priv->resource_lock);
488         res = idr_find(&dev_priv->context_idr, id);
489         if (res && res->avail) {
490                 struct vmw_user_context *ctx =
491                         container_of(res, struct vmw_user_context, res);
492                 if (ctx->base.tfile != tfile && !ctx->base.shareable)
493                         ret = -EPERM;
494                 if (p_res)
495                         *p_res = vmw_resource_reference(res);
496         } else
497                 ret = -EINVAL;
498         read_unlock(&dev_priv->resource_lock);
499
500         return ret;
501 }
502
503 struct vmw_bpp {
504         uint8_t bpp;
505         uint8_t s_bpp;
506 };
507
508 /*
509  * Size table for the supported SVGA3D surface formats. It consists of
510  * two values. The bpp value and the s_bpp value which is short for
511  * "stride bits per pixel" The values are given in such a way that the
512  * minimum stride for the image is calculated using
513  *
514  * min_stride = w*s_bpp
515  *
516  * and the total memory requirement for the image is
517  *
518  * h*min_stride*bpp/s_bpp
519  *
520  */
521 static const struct vmw_bpp vmw_sf_bpp[] = {
522         [SVGA3D_FORMAT_INVALID] = {0, 0},
523         [SVGA3D_X8R8G8B8] = {32, 32},
524         [SVGA3D_A8R8G8B8] = {32, 32},
525         [SVGA3D_R5G6B5] = {16, 16},
526         [SVGA3D_X1R5G5B5] = {16, 16},
527         [SVGA3D_A1R5G5B5] = {16, 16},
528         [SVGA3D_A4R4G4B4] = {16, 16},
529         [SVGA3D_Z_D32] = {32, 32},
530         [SVGA3D_Z_D16] = {16, 16},
531         [SVGA3D_Z_D24S8] = {32, 32},
532         [SVGA3D_Z_D15S1] = {16, 16},
533         [SVGA3D_LUMINANCE8] = {8, 8},
534         [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
535         [SVGA3D_LUMINANCE16] = {16, 16},
536         [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
537         [SVGA3D_DXT1] = {4, 16},
538         [SVGA3D_DXT2] = {8, 32},
539         [SVGA3D_DXT3] = {8, 32},
540         [SVGA3D_DXT4] = {8, 32},
541         [SVGA3D_DXT5] = {8, 32},
542         [SVGA3D_BUMPU8V8] = {16, 16},
543         [SVGA3D_BUMPL6V5U5] = {16, 16},
544         [SVGA3D_BUMPX8L8V8U8] = {32, 32},
545         [SVGA3D_ARGB_S10E5] = {16, 16},
546         [SVGA3D_ARGB_S23E8] = {32, 32},
547         [SVGA3D_A2R10G10B10] = {32, 32},
548         [SVGA3D_V8U8] = {16, 16},
549         [SVGA3D_Q8W8V8U8] = {32, 32},
550         [SVGA3D_CxV8U8] = {16, 16},
551         [SVGA3D_X8L8V8U8] = {32, 32},
552         [SVGA3D_A2W10V10U10] = {32, 32},
553         [SVGA3D_ALPHA8] = {8, 8},
554         [SVGA3D_R_S10E5] = {16, 16},
555         [SVGA3D_R_S23E8] = {32, 32},
556         [SVGA3D_RG_S10E5] = {16, 16},
557         [SVGA3D_RG_S23E8] = {32, 32},
558         [SVGA3D_BUFFER] = {8, 8},
559         [SVGA3D_Z_D24X8] = {32, 32},
560         [SVGA3D_V16U16] = {32, 32},
561         [SVGA3D_G16R16] = {32, 32},
562         [SVGA3D_A16B16G16R16] = {64,  64},
563         [SVGA3D_UYVY] = {12, 12},
564         [SVGA3D_YUY2] = {12, 12},
565         [SVGA3D_NV12] = {12, 8},
566         [SVGA3D_AYUV] = {32, 32},
567         [SVGA3D_BC4_UNORM] = {4,  16},
568         [SVGA3D_BC5_UNORM] = {8,  32},
569         [SVGA3D_Z_DF16] = {16,  16},
570         [SVGA3D_Z_DF24] = {24,  24},
571         [SVGA3D_Z_D24S8_INT] = {32,  32}
572 };
573
574
575 /**
576  * Surface management.
577  */
578
579 struct vmw_surface_dma {
580         SVGA3dCmdHeader header;
581         SVGA3dCmdSurfaceDMA body;
582         SVGA3dCopyBox cb;
583         SVGA3dCmdSurfaceDMASuffix suffix;
584 };
585
586 struct vmw_surface_define {
587         SVGA3dCmdHeader header;
588         SVGA3dCmdDefineSurface body;
589 };
590
591 struct vmw_surface_destroy {
592         SVGA3dCmdHeader header;
593         SVGA3dCmdDestroySurface body;
594 };
595
596
597 /**
598  * vmw_surface_dma_size - Compute fifo size for a dma command.
599  *
600  * @srf: Pointer to a struct vmw_surface
601  *
602  * Computes the required size for a surface dma command for backup or
603  * restoration of the surface represented by @srf.
604  */
605 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
606 {
607         return srf->num_sizes * sizeof(struct vmw_surface_dma);
608 }
609
610
611 /**
612  * vmw_surface_define_size - Compute fifo size for a surface define command.
613  *
614  * @srf: Pointer to a struct vmw_surface
615  *
616  * Computes the required size for a surface define command for the definition
617  * of the surface represented by @srf.
618  */
619 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
620 {
621         return sizeof(struct vmw_surface_define) + srf->num_sizes *
622                 sizeof(SVGA3dSize);
623 }
624
625
626 /**
627  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
628  *
629  * Computes the required size for a surface destroy command for the destruction
630  * of a hw surface.
631  */
632 static inline uint32_t vmw_surface_destroy_size(void)
633 {
634         return sizeof(struct vmw_surface_destroy);
635 }
636
637 /**
638  * vmw_surface_destroy_encode - Encode a surface_destroy command.
639  *
640  * @id: The surface id
641  * @cmd_space: Pointer to memory area in which the commands should be encoded.
642  */
643 static void vmw_surface_destroy_encode(uint32_t id,
644                                        void *cmd_space)
645 {
646         struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
647                 cmd_space;
648
649         cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
650         cmd->header.size = sizeof(cmd->body);
651         cmd->body.sid = id;
652 }
653
654 /**
655  * vmw_surface_define_encode - Encode a surface_define command.
656  *
657  * @srf: Pointer to a struct vmw_surface object.
658  * @cmd_space: Pointer to memory area in which the commands should be encoded.
659  */
660 static void vmw_surface_define_encode(const struct vmw_surface *srf,
661                                       void *cmd_space)
662 {
663         struct vmw_surface_define *cmd = (struct vmw_surface_define *)
664                 cmd_space;
665         struct drm_vmw_size *src_size;
666         SVGA3dSize *cmd_size;
667         uint32_t cmd_len;
668         int i;
669
670         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
671
672         cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
673         cmd->header.size = cmd_len;
674         cmd->body.sid = srf->res.id;
675         cmd->body.surfaceFlags = srf->flags;
676         cmd->body.format = cpu_to_le32(srf->format);
677         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
678                 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
679
680         cmd += 1;
681         cmd_size = (SVGA3dSize *) cmd;
682         src_size = srf->sizes;
683
684         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
685                 cmd_size->width = src_size->width;
686                 cmd_size->height = src_size->height;
687                 cmd_size->depth = src_size->depth;
688         }
689 }
690
691
692 /**
693  * vmw_surface_dma_encode - Encode a surface_dma command.
694  *
695  * @srf: Pointer to a struct vmw_surface object.
696  * @cmd_space: Pointer to memory area in which the commands should be encoded.
697  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698  * should be placed or read from.
699  * @to_surface: Boolean whether to DMA to the surface or from the surface.
700  */
701 static void vmw_surface_dma_encode(struct vmw_surface *srf,
702                                    void *cmd_space,
703                                    const SVGAGuestPtr *ptr,
704                                    bool to_surface)
705 {
706         uint32_t i;
707         uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
708         uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
709         struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
710
711         for (i = 0; i < srf->num_sizes; ++i) {
712                 SVGA3dCmdHeader *header = &cmd->header;
713                 SVGA3dCmdSurfaceDMA *body = &cmd->body;
714                 SVGA3dCopyBox *cb = &cmd->cb;
715                 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
716                 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
717                 const struct drm_vmw_size *cur_size = &srf->sizes[i];
718
719                 header->id = SVGA_3D_CMD_SURFACE_DMA;
720                 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
721
722                 body->guest.ptr = *ptr;
723                 body->guest.ptr.offset += cur_offset->bo_offset;
724                 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
725                 body->host.sid = srf->res.id;
726                 body->host.face = cur_offset->face;
727                 body->host.mipmap = cur_offset->mip;
728                 body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
729                                   SVGA3D_READ_HOST_VRAM);
730                 cb->x = 0;
731                 cb->y = 0;
732                 cb->z = 0;
733                 cb->srcx = 0;
734                 cb->srcy = 0;
735                 cb->srcz = 0;
736                 cb->w = cur_size->width;
737                 cb->h = cur_size->height;
738                 cb->d = cur_size->depth;
739
740                 suffix->suffixSize = sizeof(*suffix);
741                 suffix->maximumOffset = body->guest.pitch*cur_size->height*
742                         cur_size->depth*bpp / stride_bpp;
743                 suffix->flags.discard = 0;
744                 suffix->flags.unsynchronized = 0;
745                 suffix->flags.reserved = 0;
746                 ++cmd;
747         }
748 };
749
750
751 static void vmw_hw_surface_destroy(struct vmw_resource *res)
752 {
753
754         struct vmw_private *dev_priv = res->dev_priv;
755         struct vmw_surface *srf;
756         void *cmd;
757
758         if (res->id != -1) {
759
760                 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
761                 if (unlikely(cmd == NULL)) {
762                         DRM_ERROR("Failed reserving FIFO space for surface "
763                                   "destruction.\n");
764                         return;
765                 }
766
767                 vmw_surface_destroy_encode(res->id, cmd);
768                 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
769
770                 /*
771                  * used_memory_size_atomic, or separate lock
772                  * to avoid taking dev_priv::cmdbuf_mutex in
773                  * the destroy path.
774                  */
775
776                 mutex_lock(&dev_priv->cmdbuf_mutex);
777                 srf = container_of(res, struct vmw_surface, res);
778                 dev_priv->used_memory_size -= srf->backup_size;
779                 mutex_unlock(&dev_priv->cmdbuf_mutex);
780
781         }
782         vmw_3d_resource_dec(dev_priv, false);
783 }
784
785 void vmw_surface_res_free(struct vmw_resource *res)
786 {
787         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
788
789         if (srf->backup)
790                 ttm_bo_unref(&srf->backup);
791         kfree(srf->offsets);
792         kfree(srf->sizes);
793         kfree(srf->snooper.image);
794         kfree(srf);
795 }
796
797
798 /**
799  * vmw_surface_do_validate - make a surface available to the device.
800  *
801  * @dev_priv: Pointer to a device private struct.
802  * @srf: Pointer to a struct vmw_surface.
803  *
804  * If the surface doesn't have a hw id, allocate one, and optionally
805  * DMA the backed up surface contents to the device.
806  *
807  * Returns -EBUSY if there wasn't sufficient device resources to
808  * complete the validation. Retry after freeing up resources.
809  *
810  * May return other errors if the kernel is out of guest resources.
811  */
812 int vmw_surface_do_validate(struct vmw_private *dev_priv,
813                             struct vmw_surface *srf)
814 {
815         struct vmw_resource *res = &srf->res;
816         struct list_head val_list;
817         struct ttm_validate_buffer val_buf;
818         uint32_t submit_size;
819         uint8_t *cmd;
820         int ret;
821
822         if (likely(res->id != -1))
823                 return 0;
824
825         if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
826                      dev_priv->memory_size))
827                 return -EBUSY;
828
829         /*
830          * Reserve- and validate the backup DMA bo.
831          */
832
833         if (srf->backup) {
834                 INIT_LIST_HEAD(&val_list);
835                 val_buf.bo = ttm_bo_reference(srf->backup);
836                 val_buf.new_sync_obj_arg = (void *)((unsigned long)
837                                                     DRM_VMW_FENCE_FLAG_EXEC);
838                 list_add_tail(&val_buf.head, &val_list);
839                 ret = ttm_eu_reserve_buffers(&val_list);
840                 if (unlikely(ret != 0))
841                         goto out_no_reserve;
842
843                 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
844                                       true, false, false);
845                 if (unlikely(ret != 0))
846                         goto out_no_validate;
847         }
848
849         /*
850          * Alloc id for the resource.
851          */
852
853         ret = vmw_resource_alloc_id(dev_priv, res);
854         if (unlikely(ret != 0)) {
855                 DRM_ERROR("Failed to allocate a surface id.\n");
856                 goto out_no_id;
857         }
858         if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
859                 ret = -EBUSY;
860                 goto out_no_fifo;
861         }
862
863
864         /*
865          * Encode surface define- and dma commands.
866          */
867
868         submit_size = vmw_surface_define_size(srf);
869         if (srf->backup)
870                 submit_size += vmw_surface_dma_size(srf);
871
872         cmd = vmw_fifo_reserve(dev_priv, submit_size);
873         if (unlikely(cmd == NULL)) {
874                 DRM_ERROR("Failed reserving FIFO space for surface "
875                           "validation.\n");
876                 ret = -ENOMEM;
877                 goto out_no_fifo;
878         }
879
880         vmw_surface_define_encode(srf, cmd);
881         if (srf->backup) {
882                 SVGAGuestPtr ptr;
883
884                 cmd += vmw_surface_define_size(srf);
885                 vmw_bo_get_guest_ptr(srf->backup, &ptr);
886                 vmw_surface_dma_encode(srf, cmd, &ptr, true);
887         }
888
889         vmw_fifo_commit(dev_priv, submit_size);
890
891         /*
892          * Create a fence object and fence the backup buffer.
893          */
894
895         if (srf->backup) {
896                 struct vmw_fence_obj *fence;
897
898                 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
899                                                   &fence, NULL);
900                 ttm_eu_fence_buffer_objects(&val_list, fence);
901                 if (likely(fence != NULL))
902                         vmw_fence_obj_unreference(&fence);
903                 ttm_bo_unref(&val_buf.bo);
904                 ttm_bo_unref(&srf->backup);
905         }
906
907         /*
908          * Surface memory usage accounting.
909          */
910
911         dev_priv->used_memory_size += srf->backup_size;
912
913         return 0;
914
915 out_no_fifo:
916         vmw_resource_release_id(res);
917 out_no_id:
918 out_no_validate:
919         if (srf->backup)
920                 ttm_eu_backoff_reservation(&val_list);
921 out_no_reserve:
922         if (srf->backup)
923                 ttm_bo_unref(&val_buf.bo);
924         return ret;
925 }
926
927 /**
928  * vmw_surface_evict - Evict a hw surface.
929  *
930  * @dev_priv: Pointer to a device private struct.
931  * @srf: Pointer to a struct vmw_surface
932  *
933  * DMA the contents of a hw surface to a backup guest buffer object,
934  * and destroy the hw surface, releasing its id.
935  */
936 int vmw_surface_evict(struct vmw_private *dev_priv,
937                       struct vmw_surface *srf)
938 {
939         struct vmw_resource *res = &srf->res;
940         struct list_head val_list;
941         struct ttm_validate_buffer val_buf;
942         uint32_t submit_size;
943         uint8_t *cmd;
944         int ret;
945         struct vmw_fence_obj *fence;
946         SVGAGuestPtr ptr;
947
948         BUG_ON(res->id == -1);
949
950         /*
951          * Create a surface backup buffer object.
952          */
953
954         if (!srf->backup) {
955                 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
956                                     ttm_bo_type_device,
957                                     &vmw_srf_placement, 0, 0, true,
958                                     NULL, &srf->backup);
959                 if (unlikely(ret != 0))
960                         return ret;
961         }
962
963         /*
964          * Reserve- and validate the backup DMA bo.
965          */
966
967         INIT_LIST_HEAD(&val_list);
968         val_buf.bo = ttm_bo_reference(srf->backup);
969         val_buf.new_sync_obj_arg = (void *)(unsigned long)
970                 DRM_VMW_FENCE_FLAG_EXEC;
971         list_add_tail(&val_buf.head, &val_list);
972         ret = ttm_eu_reserve_buffers(&val_list);
973         if (unlikely(ret != 0))
974                 goto out_no_reserve;
975
976         ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
977                               true, false, false);
978         if (unlikely(ret != 0))
979                 goto out_no_validate;
980
981
982         /*
983          * Encode the dma- and surface destroy commands.
984          */
985
986         submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
987         cmd = vmw_fifo_reserve(dev_priv, submit_size);
988         if (unlikely(cmd == NULL)) {
989                 DRM_ERROR("Failed reserving FIFO space for surface "
990                           "eviction.\n");
991                 ret = -ENOMEM;
992                 goto out_no_fifo;
993         }
994
995         vmw_bo_get_guest_ptr(srf->backup, &ptr);
996         vmw_surface_dma_encode(srf, cmd, &ptr, false);
997         cmd += vmw_surface_dma_size(srf);
998         vmw_surface_destroy_encode(res->id, cmd);
999         vmw_fifo_commit(dev_priv, submit_size);
1000
1001         /*
1002          * Surface memory usage accounting.
1003          */
1004
1005         dev_priv->used_memory_size -= srf->backup_size;
1006
1007         /*
1008          * Create a fence object and fence the DMA buffer.
1009          */
1010
1011         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1012                                           &fence, NULL);
1013         ttm_eu_fence_buffer_objects(&val_list, fence);
1014         if (likely(fence != NULL))
1015                 vmw_fence_obj_unreference(&fence);
1016         ttm_bo_unref(&val_buf.bo);
1017
1018         /*
1019          * Release the surface ID.
1020          */
1021
1022         vmw_resource_release_id(res);
1023
1024         return 0;
1025
1026 out_no_fifo:
1027 out_no_validate:
1028         if (srf->backup)
1029                 ttm_eu_backoff_reservation(&val_list);
1030 out_no_reserve:
1031         ttm_bo_unref(&val_buf.bo);
1032         ttm_bo_unref(&srf->backup);
1033         return ret;
1034 }
1035
1036
1037 /**
1038  * vmw_surface_validate - make a surface available to the device, evicting
1039  * other surfaces if needed.
1040  *
1041  * @dev_priv: Pointer to a device private struct.
1042  * @srf: Pointer to a struct vmw_surface.
1043  *
1044  * Try to validate a surface and if it fails due to limited device resources,
1045  * repeatedly try to evict other surfaces until the request can be
1046  * acommodated.
1047  *
1048  * May return errors if out of resources.
1049  */
1050 int vmw_surface_validate(struct vmw_private *dev_priv,
1051                          struct vmw_surface *srf)
1052 {
1053         int ret;
1054         struct vmw_surface *evict_srf;
1055
1056         do {
1057                 write_lock(&dev_priv->resource_lock);
1058                 list_del_init(&srf->lru_head);
1059                 write_unlock(&dev_priv->resource_lock);
1060
1061                 ret = vmw_surface_do_validate(dev_priv, srf);
1062                 if (likely(ret != -EBUSY))
1063                         break;
1064
1065                 write_lock(&dev_priv->resource_lock);
1066                 if (list_empty(&dev_priv->surface_lru)) {
1067                         DRM_ERROR("Out of device memory for surfaces.\n");
1068                         ret = -EBUSY;
1069                         write_unlock(&dev_priv->resource_lock);
1070                         break;
1071                 }
1072
1073                 evict_srf = vmw_surface_reference
1074                         (list_first_entry(&dev_priv->surface_lru,
1075                                           struct vmw_surface,
1076                                           lru_head));
1077                 list_del_init(&evict_srf->lru_head);
1078
1079                 write_unlock(&dev_priv->resource_lock);
1080                 (void) vmw_surface_evict(dev_priv, evict_srf);
1081
1082                 vmw_surface_unreference(&evict_srf);
1083
1084         } while (1);
1085
1086         if (unlikely(ret != 0 && srf->res.id != -1)) {
1087                 write_lock(&dev_priv->resource_lock);
1088                 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1089                 write_unlock(&dev_priv->resource_lock);
1090         }
1091
1092         return ret;
1093 }
1094
1095
1096 /**
1097  * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1098  *
1099  * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1100  *
1101  * As part of the resource destruction, remove the surface from any
1102  * lookup lists.
1103  */
1104 static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1105 {
1106         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1107
1108         list_del_init(&srf->lru_head);
1109 }
1110
1111 int vmw_surface_init(struct vmw_private *dev_priv,
1112                      struct vmw_surface *srf,
1113                      void (*res_free) (struct vmw_resource *res))
1114 {
1115         int ret;
1116         struct vmw_resource *res = &srf->res;
1117
1118         BUG_ON(res_free == NULL);
1119         INIT_LIST_HEAD(&srf->lru_head);
1120         ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1121                                 VMW_RES_SURFACE, true, res_free,
1122                                 vmw_surface_remove_from_lists);
1123
1124         if (unlikely(ret != 0))
1125                 res_free(res);
1126
1127         /*
1128          * The surface won't be visible to hardware until a
1129          * surface validate.
1130          */
1131
1132         (void) vmw_3d_resource_inc(dev_priv, false);
1133         vmw_resource_activate(res, vmw_hw_surface_destroy);
1134         return ret;
1135 }
1136
1137 static void vmw_user_surface_free(struct vmw_resource *res)
1138 {
1139         struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1140         struct vmw_user_surface *user_srf =
1141             container_of(srf, struct vmw_user_surface, srf);
1142         struct vmw_private *dev_priv = srf->res.dev_priv;
1143         uint32_t size = user_srf->size;
1144
1145         if (srf->backup)
1146                 ttm_bo_unref(&srf->backup);
1147         kfree(srf->offsets);
1148         kfree(srf->sizes);
1149         kfree(srf->snooper.image);
1150         kfree(user_srf);
1151         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1152 }
1153
1154 /**
1155  * vmw_resource_unreserve - unreserve resources previously reserved for
1156  * command submission.
1157  *
1158  * @list_head: list of resources to unreserve.
1159  *
1160  * Currently only surfaces are considered, and unreserving a surface
1161  * means putting it back on the device's surface lru list,
1162  * so that it can be evicted if necessary.
1163  * This function traverses the resource list and
1164  * checks whether resources are surfaces, and in that case puts them back
1165  * on the device's surface LRU list.
1166  */
1167 void vmw_resource_unreserve(struct list_head *list)
1168 {
1169         struct vmw_resource *res;
1170         struct vmw_surface *srf;
1171         rwlock_t *lock = NULL;
1172
1173         list_for_each_entry(res, list, validate_head) {
1174
1175                 if (res->res_free != &vmw_surface_res_free &&
1176                     res->res_free != &vmw_user_surface_free)
1177                         continue;
1178
1179                 if (unlikely(lock == NULL)) {
1180                         lock = &res->dev_priv->resource_lock;
1181                         write_lock(lock);
1182                 }
1183
1184                 srf = container_of(res, struct vmw_surface, res);
1185                 list_del_init(&srf->lru_head);
1186                 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1187         }
1188
1189         if (lock != NULL)
1190                 write_unlock(lock);
1191 }
1192
1193 /**
1194  * Helper function that looks either a surface or dmabuf.
1195  *
1196  * The pointer this pointed at by out_surf and out_buf needs to be null.
1197  */
1198 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1199                            struct ttm_object_file *tfile,
1200                            uint32_t handle,
1201                            struct vmw_surface **out_surf,
1202                            struct vmw_dma_buffer **out_buf)
1203 {
1204         int ret;
1205
1206         BUG_ON(*out_surf || *out_buf);
1207
1208         ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1209         if (!ret)
1210                 return 0;
1211
1212         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1213         return ret;
1214 }
1215
1216
1217 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1218                                    struct ttm_object_file *tfile,
1219                                    uint32_t handle, struct vmw_surface **out)
1220 {
1221         struct vmw_resource *res;
1222         struct vmw_surface *srf;
1223         struct vmw_user_surface *user_srf;
1224         struct ttm_base_object *base;
1225         int ret = -EINVAL;
1226
1227         base = ttm_base_object_lookup(tfile, handle);
1228         if (unlikely(base == NULL))
1229                 return -EINVAL;
1230
1231         if (unlikely(base->object_type != VMW_RES_SURFACE))
1232                 goto out_bad_resource;
1233
1234         user_srf = container_of(base, struct vmw_user_surface, base);
1235         srf = &user_srf->srf;
1236         res = &srf->res;
1237
1238         read_lock(&dev_priv->resource_lock);
1239
1240         if (!res->avail || res->res_free != &vmw_user_surface_free) {
1241                 read_unlock(&dev_priv->resource_lock);
1242                 goto out_bad_resource;
1243         }
1244
1245         kref_get(&res->kref);
1246         read_unlock(&dev_priv->resource_lock);
1247
1248         *out = srf;
1249         ret = 0;
1250
1251 out_bad_resource:
1252         ttm_base_object_unref(&base);
1253
1254         return ret;
1255 }
1256
1257 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1258 {
1259         struct ttm_base_object *base = *p_base;
1260         struct vmw_user_surface *user_srf =
1261             container_of(base, struct vmw_user_surface, base);
1262         struct vmw_resource *res = &user_srf->srf.res;
1263
1264         *p_base = NULL;
1265         vmw_resource_unreference(&res);
1266 }
1267
1268 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1269                               struct drm_file *file_priv)
1270 {
1271         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1272         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1273
1274         return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
1275 }
1276
1277 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1278                              struct drm_file *file_priv)
1279 {
1280         struct vmw_private *dev_priv = vmw_priv(dev);
1281         struct vmw_user_surface *user_srf;
1282         struct vmw_surface *srf;
1283         struct vmw_resource *res;
1284         struct vmw_resource *tmp;
1285         union drm_vmw_surface_create_arg *arg =
1286             (union drm_vmw_surface_create_arg *)data;
1287         struct drm_vmw_surface_create_req *req = &arg->req;
1288         struct drm_vmw_surface_arg *rep = &arg->rep;
1289         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1290         struct drm_vmw_size __user *user_sizes;
1291         int ret;
1292         int i, j;
1293         uint32_t cur_bo_offset;
1294         struct drm_vmw_size *cur_size;
1295         struct vmw_surface_offset *cur_offset;
1296         uint32_t stride_bpp;
1297         uint32_t bpp;
1298         uint32_t num_sizes;
1299         uint32_t size;
1300         struct vmw_master *vmaster = vmw_master(file_priv->master);
1301
1302         if (unlikely(vmw_user_surface_size == 0))
1303                 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1304                         128;
1305
1306         num_sizes = 0;
1307         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1308                 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
1309                         return -EINVAL;
1310                 num_sizes += req->mip_levels[i];
1311         }
1312
1313         if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
1314             num_sizes == 0)
1315                 return -EINVAL;
1316
1317         size = vmw_user_surface_size + 128 +
1318                 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1319                 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1320
1321
1322         ret = ttm_read_lock(&vmaster->lock, true);
1323         if (unlikely(ret != 0))
1324                 return ret;
1325
1326         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1327                                    size, false, true);
1328         if (unlikely(ret != 0)) {
1329                 if (ret != -ERESTARTSYS)
1330                         DRM_ERROR("Out of graphics memory for surface"
1331                                   " creation.\n");
1332                 goto out_unlock;
1333         }
1334
1335         user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1336         if (unlikely(user_srf == NULL)) {
1337                 ret = -ENOMEM;
1338                 goto out_no_user_srf;
1339         }
1340
1341         srf = &user_srf->srf;
1342         res = &srf->res;
1343
1344         srf->flags = req->flags;
1345         srf->format = req->format;
1346         srf->scanout = req->scanout;
1347         srf->backup = NULL;
1348
1349         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1350         srf->num_sizes = num_sizes;
1351         user_srf->size = size;
1352
1353         srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1354         if (unlikely(srf->sizes == NULL)) {
1355                 ret = -ENOMEM;
1356                 goto out_no_sizes;
1357         }
1358         srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1359                                GFP_KERNEL);
1360         if (unlikely(srf->sizes == NULL)) {
1361                 ret = -ENOMEM;
1362                 goto out_no_offsets;
1363         }
1364
1365         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1366             req->size_addr;
1367
1368         ret = copy_from_user(srf->sizes, user_sizes,
1369                              srf->num_sizes * sizeof(*srf->sizes));
1370         if (unlikely(ret != 0)) {
1371                 ret = -EFAULT;
1372                 goto out_no_copy;
1373         }
1374
1375         cur_bo_offset = 0;
1376         cur_offset = srf->offsets;
1377         cur_size = srf->sizes;
1378
1379         bpp = vmw_sf_bpp[srf->format].bpp;
1380         stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1381
1382         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1383                 for (j = 0; j < srf->mip_levels[i]; ++j) {
1384                         uint32_t stride =
1385                                 (cur_size->width * stride_bpp + 7) >> 3;
1386
1387                         cur_offset->face = i;
1388                         cur_offset->mip = j;
1389                         cur_offset->bo_offset = cur_bo_offset;
1390                         cur_bo_offset += stride * cur_size->height *
1391                                 cur_size->depth * bpp / stride_bpp;
1392                         ++cur_offset;
1393                         ++cur_size;
1394                 }
1395         }
1396         srf->backup_size = cur_bo_offset;
1397
1398         if (srf->scanout &&
1399             srf->num_sizes == 1 &&
1400             srf->sizes[0].width == 64 &&
1401             srf->sizes[0].height == 64 &&
1402             srf->format == SVGA3D_A8R8G8B8) {
1403
1404                 /* allocate image area and clear it */
1405                 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1406                 if (!srf->snooper.image) {
1407                         DRM_ERROR("Failed to allocate cursor_image\n");
1408                         ret = -ENOMEM;
1409                         goto out_no_copy;
1410                 }
1411         } else {
1412                 srf->snooper.image = NULL;
1413         }
1414         srf->snooper.crtc = NULL;
1415
1416         user_srf->base.shareable = false;
1417         user_srf->base.tfile = NULL;
1418
1419         /**
1420          * From this point, the generic resource management functions
1421          * destroy the object on failure.
1422          */
1423
1424         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1425         if (unlikely(ret != 0))
1426                 goto out_unlock;
1427
1428         tmp = vmw_resource_reference(&srf->res);
1429         ret = ttm_base_object_init(tfile, &user_srf->base,
1430                                    req->shareable, VMW_RES_SURFACE,
1431                                    &vmw_user_surface_base_release, NULL);
1432
1433         if (unlikely(ret != 0)) {
1434                 vmw_resource_unreference(&tmp);
1435                 vmw_resource_unreference(&res);
1436                 goto out_unlock;
1437         }
1438
1439         rep->sid = user_srf->base.hash.key;
1440         if (rep->sid == SVGA3D_INVALID_ID)
1441                 DRM_ERROR("Created bad Surface ID.\n");
1442
1443         vmw_resource_unreference(&res);
1444
1445         ttm_read_unlock(&vmaster->lock);
1446         return 0;
1447 out_no_copy:
1448         kfree(srf->offsets);
1449 out_no_offsets:
1450         kfree(srf->sizes);
1451 out_no_sizes:
1452         kfree(user_srf);
1453 out_no_user_srf:
1454         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1455 out_unlock:
1456         ttm_read_unlock(&vmaster->lock);
1457         return ret;
1458 }
1459
1460 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1461                                 struct drm_file *file_priv)
1462 {
1463         union drm_vmw_surface_reference_arg *arg =
1464             (union drm_vmw_surface_reference_arg *)data;
1465         struct drm_vmw_surface_arg *req = &arg->req;
1466         struct drm_vmw_surface_create_req *rep = &arg->rep;
1467         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1468         struct vmw_surface *srf;
1469         struct vmw_user_surface *user_srf;
1470         struct drm_vmw_size __user *user_sizes;
1471         struct ttm_base_object *base;
1472         int ret = -EINVAL;
1473
1474         base = ttm_base_object_lookup(tfile, req->sid);
1475         if (unlikely(base == NULL)) {
1476                 DRM_ERROR("Could not find surface to reference.\n");
1477                 return -EINVAL;
1478         }
1479
1480         if (unlikely(base->object_type != VMW_RES_SURFACE))
1481                 goto out_bad_resource;
1482
1483         user_srf = container_of(base, struct vmw_user_surface, base);
1484         srf = &user_srf->srf;
1485
1486         ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1487         if (unlikely(ret != 0)) {
1488                 DRM_ERROR("Could not add a reference to a surface.\n");
1489                 goto out_no_reference;
1490         }
1491
1492         rep->flags = srf->flags;
1493         rep->format = srf->format;
1494         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1495         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1496             rep->size_addr;
1497
1498         if (user_sizes)
1499                 ret = copy_to_user(user_sizes, srf->sizes,
1500                                    srf->num_sizes * sizeof(*srf->sizes));
1501         if (unlikely(ret != 0)) {
1502                 DRM_ERROR("copy_to_user failed %p %u\n",
1503                           user_sizes, srf->num_sizes);
1504                 ret = -EFAULT;
1505         }
1506 out_bad_resource:
1507 out_no_reference:
1508         ttm_base_object_unref(&base);
1509
1510         return ret;
1511 }
1512
1513 int vmw_surface_check(struct vmw_private *dev_priv,
1514                       struct ttm_object_file *tfile,
1515                       uint32_t handle, int *id)
1516 {
1517         struct ttm_base_object *base;
1518         struct vmw_user_surface *user_srf;
1519
1520         int ret = -EPERM;
1521
1522         base = ttm_base_object_lookup(tfile, handle);
1523         if (unlikely(base == NULL))
1524                 return -EINVAL;
1525
1526         if (unlikely(base->object_type != VMW_RES_SURFACE))
1527                 goto out_bad_surface;
1528
1529         user_srf = container_of(base, struct vmw_user_surface, base);
1530         *id = user_srf->srf.res.id;
1531         ret = 0;
1532
1533 out_bad_surface:
1534         /**
1535          * FIXME: May deadlock here when called from the
1536          * command parsing code.
1537          */
1538
1539         ttm_base_object_unref(&base);
1540         return ret;
1541 }
1542
1543 /**
1544  * Buffer management.
1545  */
1546
1547 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
1548                                   unsigned long num_pages)
1549 {
1550         static size_t bo_user_size = ~0;
1551
1552         size_t page_array_size =
1553             (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
1554
1555         if (unlikely(bo_user_size == ~0)) {
1556                 bo_user_size = glob->ttm_bo_extra_size +
1557                     ttm_round_pot(sizeof(struct vmw_dma_buffer));
1558         }
1559
1560         return bo_user_size + page_array_size;
1561 }
1562
1563 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1564 {
1565         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1566         struct ttm_bo_global *glob = bo->glob;
1567
1568         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1569         kfree(vmw_bo);
1570 }
1571
1572 int vmw_dmabuf_init(struct vmw_private *dev_priv,
1573                     struct vmw_dma_buffer *vmw_bo,
1574                     size_t size, struct ttm_placement *placement,
1575                     bool interruptible,
1576                     void (*bo_free) (struct ttm_buffer_object *bo))
1577 {
1578         struct ttm_bo_device *bdev = &dev_priv->bdev;
1579         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1580         size_t acc_size;
1581         int ret;
1582
1583         BUG_ON(!bo_free);
1584
1585         acc_size =
1586             vmw_dmabuf_acc_size(bdev->glob,
1587                                 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1588
1589         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1590         if (unlikely(ret != 0)) {
1591                 /* we must free the bo here as
1592                  * ttm_buffer_object_init does so as well */
1593                 bo_free(&vmw_bo->base);
1594                 return ret;
1595         }
1596
1597         memset(vmw_bo, 0, sizeof(*vmw_bo));
1598
1599         INIT_LIST_HEAD(&vmw_bo->validate_list);
1600
1601         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1602                           ttm_bo_type_device, placement,
1603                           0, 0, interruptible,
1604                           NULL, acc_size, bo_free);
1605         return ret;
1606 }
1607
1608 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1609 {
1610         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1611         struct ttm_bo_global *glob = bo->glob;
1612
1613         ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1614         kfree(vmw_user_bo);
1615 }
1616
1617 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1618 {
1619         struct vmw_user_dma_buffer *vmw_user_bo;
1620         struct ttm_base_object *base = *p_base;
1621         struct ttm_buffer_object *bo;
1622
1623         *p_base = NULL;
1624
1625         if (unlikely(base == NULL))
1626                 return;
1627
1628         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1629         bo = &vmw_user_bo->dma.base;
1630         ttm_bo_unref(&bo);
1631 }
1632
1633 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1634                            struct drm_file *file_priv)
1635 {
1636         struct vmw_private *dev_priv = vmw_priv(dev);
1637         union drm_vmw_alloc_dmabuf_arg *arg =
1638             (union drm_vmw_alloc_dmabuf_arg *)data;
1639         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1640         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1641         struct vmw_user_dma_buffer *vmw_user_bo;
1642         struct ttm_buffer_object *tmp;
1643         struct vmw_master *vmaster = vmw_master(file_priv->master);
1644         int ret;
1645
1646         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1647         if (unlikely(vmw_user_bo == NULL))
1648                 return -ENOMEM;
1649
1650         ret = ttm_read_lock(&vmaster->lock, true);
1651         if (unlikely(ret != 0)) {
1652                 kfree(vmw_user_bo);
1653                 return ret;
1654         }
1655
1656         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
1657                               &vmw_vram_sys_placement, true,
1658                               &vmw_user_dmabuf_destroy);
1659         if (unlikely(ret != 0))
1660                 goto out_no_dmabuf;
1661
1662         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1663         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1664                                    &vmw_user_bo->base,
1665                                    false,
1666                                    ttm_buffer_type,
1667                                    &vmw_user_dmabuf_release, NULL);
1668         if (unlikely(ret != 0))
1669                 goto out_no_base_object;
1670         else {
1671                 rep->handle = vmw_user_bo->base.hash.key;
1672                 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1673                 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1674                 rep->cur_gmr_offset = 0;
1675         }
1676
1677 out_no_base_object:
1678         ttm_bo_unref(&tmp);
1679 out_no_dmabuf:
1680         ttm_read_unlock(&vmaster->lock);
1681
1682         return ret;
1683 }
1684
1685 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1686                            struct drm_file *file_priv)
1687 {
1688         struct drm_vmw_unref_dmabuf_arg *arg =
1689             (struct drm_vmw_unref_dmabuf_arg *)data;
1690
1691         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1692                                          arg->handle,
1693                                          TTM_REF_USAGE);
1694 }
1695
1696 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
1697                                   uint32_t cur_validate_node)
1698 {
1699         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1700
1701         if (likely(vmw_bo->on_validate_list))
1702                 return vmw_bo->cur_validate_node;
1703
1704         vmw_bo->cur_validate_node = cur_validate_node;
1705         vmw_bo->on_validate_list = true;
1706
1707         return cur_validate_node;
1708 }
1709
1710 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
1711 {
1712         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1713
1714         vmw_bo->on_validate_list = false;
1715 }
1716
1717 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1718                            uint32_t handle, struct vmw_dma_buffer **out)
1719 {
1720         struct vmw_user_dma_buffer *vmw_user_bo;
1721         struct ttm_base_object *base;
1722
1723         base = ttm_base_object_lookup(tfile, handle);
1724         if (unlikely(base == NULL)) {
1725                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1726                        (unsigned long)handle);
1727                 return -ESRCH;
1728         }
1729
1730         if (unlikely(base->object_type != ttm_buffer_type)) {
1731                 ttm_base_object_unref(&base);
1732                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1733                        (unsigned long)handle);
1734                 return -EINVAL;
1735         }
1736
1737         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1738         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
1739         ttm_base_object_unref(&base);
1740         *out = &vmw_user_bo->dma;
1741
1742         return 0;
1743 }
1744
1745 /*
1746  * Stream management
1747  */
1748
1749 static void vmw_stream_destroy(struct vmw_resource *res)
1750 {
1751         struct vmw_private *dev_priv = res->dev_priv;
1752         struct vmw_stream *stream;
1753         int ret;
1754
1755         DRM_INFO("%s: unref\n", __func__);
1756         stream = container_of(res, struct vmw_stream, res);
1757
1758         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1759         WARN_ON(ret != 0);
1760 }
1761
1762 static int vmw_stream_init(struct vmw_private *dev_priv,
1763                            struct vmw_stream *stream,
1764                            void (*res_free) (struct vmw_resource *res))
1765 {
1766         struct vmw_resource *res = &stream->res;
1767         int ret;
1768
1769         ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1770                                 VMW_RES_STREAM, false, res_free, NULL);
1771
1772         if (unlikely(ret != 0)) {
1773                 if (res_free == NULL)
1774                         kfree(stream);
1775                 else
1776                         res_free(&stream->res);
1777                 return ret;
1778         }
1779
1780         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1781         if (ret) {
1782                 vmw_resource_unreference(&res);
1783                 return ret;
1784         }
1785
1786         DRM_INFO("%s: claimed\n", __func__);
1787
1788         vmw_resource_activate(&stream->res, vmw_stream_destroy);
1789         return 0;
1790 }
1791
1792 /**
1793  * User-space context management:
1794  */
1795
1796 static void vmw_user_stream_free(struct vmw_resource *res)
1797 {
1798         struct vmw_user_stream *stream =
1799             container_of(res, struct vmw_user_stream, stream.res);
1800         struct vmw_private *dev_priv = res->dev_priv;
1801
1802         kfree(stream);
1803         ttm_mem_global_free(vmw_mem_glob(dev_priv),
1804                             vmw_user_stream_size);
1805 }
1806
1807 /**
1808  * This function is called when user space has no more references on the
1809  * base object. It releases the base-object's reference on the resource object.
1810  */
1811
1812 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1813 {
1814         struct ttm_base_object *base = *p_base;
1815         struct vmw_user_stream *stream =
1816             container_of(base, struct vmw_user_stream, base);
1817         struct vmw_resource *res = &stream->stream.res;
1818
1819         *p_base = NULL;
1820         vmw_resource_unreference(&res);
1821 }
1822
1823 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1824                            struct drm_file *file_priv)
1825 {
1826         struct vmw_private *dev_priv = vmw_priv(dev);
1827         struct vmw_resource *res;
1828         struct vmw_user_stream *stream;
1829         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1830         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1831         int ret = 0;
1832
1833         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1834         if (unlikely(res == NULL))
1835                 return -EINVAL;
1836
1837         if (res->res_free != &vmw_user_stream_free) {
1838                 ret = -EINVAL;
1839                 goto out;
1840         }
1841
1842         stream = container_of(res, struct vmw_user_stream, stream.res);
1843         if (stream->base.tfile != tfile) {
1844                 ret = -EINVAL;
1845                 goto out;
1846         }
1847
1848         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1849 out:
1850         vmw_resource_unreference(&res);
1851         return ret;
1852 }
1853
1854 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1855                            struct drm_file *file_priv)
1856 {
1857         struct vmw_private *dev_priv = vmw_priv(dev);
1858         struct vmw_user_stream *stream;
1859         struct vmw_resource *res;
1860         struct vmw_resource *tmp;
1861         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1862         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1863         struct vmw_master *vmaster = vmw_master(file_priv->master);
1864         int ret;
1865
1866         /*
1867          * Approximate idr memory usage with 128 bytes. It will be limited
1868          * by maximum number_of streams anyway?
1869          */
1870
1871         if (unlikely(vmw_user_stream_size == 0))
1872                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
1873
1874         ret = ttm_read_lock(&vmaster->lock, true);
1875         if (unlikely(ret != 0))
1876                 return ret;
1877
1878         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1879                                    vmw_user_stream_size,
1880                                    false, true);
1881         if (unlikely(ret != 0)) {
1882                 if (ret != -ERESTARTSYS)
1883                         DRM_ERROR("Out of graphics memory for stream"
1884                                   " creation.\n");
1885                 goto out_unlock;
1886         }
1887
1888
1889         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1890         if (unlikely(stream == NULL)) {
1891                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1892                                     vmw_user_stream_size);
1893                 ret = -ENOMEM;
1894                 goto out_unlock;
1895         }
1896
1897         res = &stream->stream.res;
1898         stream->base.shareable = false;
1899         stream->base.tfile = NULL;
1900
1901         /*
1902          * From here on, the destructor takes over resource freeing.
1903          */
1904
1905         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1906         if (unlikely(ret != 0))
1907                 goto out_unlock;
1908
1909         tmp = vmw_resource_reference(res);
1910         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1911                                    &vmw_user_stream_base_release, NULL);
1912
1913         if (unlikely(ret != 0)) {
1914                 vmw_resource_unreference(&tmp);
1915                 goto out_err;
1916         }
1917
1918         arg->stream_id = res->id;
1919 out_err:
1920         vmw_resource_unreference(&res);
1921 out_unlock:
1922         ttm_read_unlock(&vmaster->lock);
1923         return ret;
1924 }
1925
1926 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1927                            struct ttm_object_file *tfile,
1928                            uint32_t *inout_id, struct vmw_resource **out)
1929 {
1930         struct vmw_user_stream *stream;
1931         struct vmw_resource *res;
1932         int ret;
1933
1934         res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1935         if (unlikely(res == NULL))
1936                 return -EINVAL;
1937
1938         if (res->res_free != &vmw_user_stream_free) {
1939                 ret = -EINVAL;
1940                 goto err_ref;
1941         }
1942
1943         stream = container_of(res, struct vmw_user_stream, stream.res);
1944         if (stream->base.tfile != tfile) {
1945                 ret = -EPERM;
1946                 goto err_ref;
1947         }
1948
1949         *inout_id = stream->stream.stream_id;
1950         *out = res;
1951         return 0;
1952 err_ref:
1953         vmw_resource_unreference(&res);
1954         return ret;
1955 }
1956
1957
1958 int vmw_dumb_create(struct drm_file *file_priv,
1959                     struct drm_device *dev,
1960                     struct drm_mode_create_dumb *args)
1961 {
1962         struct vmw_private *dev_priv = vmw_priv(dev);
1963         struct vmw_master *vmaster = vmw_master(file_priv->master);
1964         struct vmw_user_dma_buffer *vmw_user_bo;
1965         struct ttm_buffer_object *tmp;
1966         int ret;
1967
1968         args->pitch = args->width * ((args->bpp + 7) / 8);
1969         args->size = args->pitch * args->height;
1970
1971         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1972         if (vmw_user_bo == NULL)
1973                 return -ENOMEM;
1974
1975         ret = ttm_read_lock(&vmaster->lock, true);
1976         if (ret != 0) {
1977                 kfree(vmw_user_bo);
1978                 return ret;
1979         }
1980
1981         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
1982                               &vmw_vram_sys_placement, true,
1983                               &vmw_user_dmabuf_destroy);
1984         if (ret != 0)
1985                 goto out_no_dmabuf;
1986
1987         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1988         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1989                                    &vmw_user_bo->base,
1990                                    false,
1991                                    ttm_buffer_type,
1992                                    &vmw_user_dmabuf_release, NULL);
1993         if (unlikely(ret != 0))
1994                 goto out_no_base_object;
1995
1996         args->handle = vmw_user_bo->base.hash.key;
1997
1998 out_no_base_object:
1999         ttm_bo_unref(&tmp);
2000 out_no_dmabuf:
2001         ttm_read_unlock(&vmaster->lock);
2002         return ret;
2003 }
2004
2005 int vmw_dumb_map_offset(struct drm_file *file_priv,
2006                         struct drm_device *dev, uint32_t handle,
2007                         uint64_t *offset)
2008 {
2009         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
2010         struct vmw_dma_buffer *out_buf;
2011         int ret;
2012
2013         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
2014         if (ret != 0)
2015                 return -EINVAL;
2016
2017         *offset = out_buf->base.addr_space_offset;
2018         vmw_dmabuf_unreference(&out_buf);
2019         return 0;
2020 }
2021
2022 int vmw_dumb_destroy(struct drm_file *file_priv,
2023                      struct drm_device *dev,
2024                      uint32_t handle)
2025 {
2026         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
2027                                          handle, TTM_REF_USAGE);
2028 }