Merge branch 'fix/hda' into for-linus
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include "ttm/ttm_bo_api.h"
31 #include "ttm/ttm_placement.h"
32
33 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34                            struct vmw_sw_context *sw_context,
35                            SVGA3dCmdHeader *header)
36 {
37         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38 }
39
40 static int vmw_cmd_ok(struct vmw_private *dev_priv,
41                       struct vmw_sw_context *sw_context,
42                       SVGA3dCmdHeader *header)
43 {
44         return 0;
45 }
46
47 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48                              struct vmw_sw_context *sw_context,
49                              SVGA3dCmdHeader *header)
50 {
51         struct vmw_cid_cmd {
52                 SVGA3dCmdHeader header;
53                 __le32 cid;
54         } *cmd;
55         int ret;
56
57         cmd = container_of(header, struct vmw_cid_cmd, header);
58         if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59                 return 0;
60
61         ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62         if (unlikely(ret != 0)) {
63                 DRM_ERROR("Could not find or use context %u\n",
64                           (unsigned) cmd->cid);
65                 return ret;
66         }
67
68         sw_context->last_cid = cmd->cid;
69         sw_context->cid_valid = true;
70
71         return 0;
72 }
73
74 static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75                              struct vmw_sw_context *sw_context,
76                              uint32_t sid)
77 {
78         if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
79                      sid != SVGA3D_INVALID_ID)) {
80                 int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
81
82                 if (unlikely(ret != 0)) {
83                         DRM_ERROR("Could ot find or use surface %u\n",
84                                   (unsigned) sid);
85                         return ret;
86                 }
87
88                 sw_context->last_sid = sid;
89                 sw_context->sid_valid = true;
90         }
91         return 0;
92 }
93
94
95 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
96                                            struct vmw_sw_context *sw_context,
97                                            SVGA3dCmdHeader *header)
98 {
99         struct vmw_sid_cmd {
100                 SVGA3dCmdHeader header;
101                 SVGA3dCmdSetRenderTarget body;
102         } *cmd;
103         int ret;
104
105         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
106         if (unlikely(ret != 0))
107                 return ret;
108
109         cmd = container_of(header, struct vmw_sid_cmd, header);
110         return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
111 }
112
113 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
114                                       struct vmw_sw_context *sw_context,
115                                       SVGA3dCmdHeader *header)
116 {
117         struct vmw_sid_cmd {
118                 SVGA3dCmdHeader header;
119                 SVGA3dCmdSurfaceCopy body;
120         } *cmd;
121         int ret;
122
123         cmd = container_of(header, struct vmw_sid_cmd, header);
124         ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
125         if (unlikely(ret != 0))
126                 return ret;
127         return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
128 }
129
130 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
131                                      struct vmw_sw_context *sw_context,
132                                      SVGA3dCmdHeader *header)
133 {
134         struct vmw_sid_cmd {
135                 SVGA3dCmdHeader header;
136                 SVGA3dCmdSurfaceStretchBlt body;
137         } *cmd;
138         int ret;
139
140         cmd = container_of(header, struct vmw_sid_cmd, header);
141         ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
142         if (unlikely(ret != 0))
143                 return ret;
144         return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
145 }
146
147 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
148                                          struct vmw_sw_context *sw_context,
149                                          SVGA3dCmdHeader *header)
150 {
151         struct vmw_sid_cmd {
152                 SVGA3dCmdHeader header;
153                 SVGA3dCmdBlitSurfaceToScreen body;
154         } *cmd;
155
156         cmd = container_of(header, struct vmw_sid_cmd, header);
157         return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
158 }
159
160 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
161                                  struct vmw_sw_context *sw_context,
162                                  SVGA3dCmdHeader *header)
163 {
164         struct vmw_sid_cmd {
165                 SVGA3dCmdHeader header;
166                 SVGA3dCmdPresent body;
167         } *cmd;
168
169         cmd = container_of(header, struct vmw_sid_cmd, header);
170         return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
171 }
172
173 static int vmw_cmd_dma(struct vmw_private *dev_priv,
174                        struct vmw_sw_context *sw_context,
175                        SVGA3dCmdHeader *header)
176 {
177         uint32_t handle;
178         struct vmw_dma_buffer *vmw_bo = NULL;
179         struct ttm_buffer_object *bo;
180         struct vmw_surface *srf = NULL;
181         struct vmw_dma_cmd {
182                 SVGA3dCmdHeader header;
183                 SVGA3dCmdSurfaceDMA dma;
184         } *cmd;
185         struct vmw_relocation *reloc;
186         int ret;
187         uint32_t cur_validate_node;
188         struct ttm_validate_buffer *val_buf;
189
190
191         cmd = container_of(header, struct vmw_dma_cmd, header);
192         ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
193         if (unlikely(ret != 0))
194                 return ret;
195
196         handle = cmd->dma.guest.ptr.gmrId;
197         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
198         if (unlikely(ret != 0)) {
199                 DRM_ERROR("Could not find or use GMR region.\n");
200                 return -EINVAL;
201         }
202         bo = &vmw_bo->base;
203
204         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
205                 DRM_ERROR("Max number of DMA commands per submission"
206                           " exceeded\n");
207                 ret = -EINVAL;
208                 goto out_no_reloc;
209         }
210
211         reloc = &sw_context->relocs[sw_context->cur_reloc++];
212         reloc->location = &cmd->dma.guest.ptr;
213
214         cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
215         if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
216                 DRM_ERROR("Max number of DMA buffers per submission"
217                           " exceeded.\n");
218                 ret = -EINVAL;
219                 goto out_no_reloc;
220         }
221
222         reloc->index = cur_validate_node;
223         if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
224                 val_buf = &sw_context->val_bufs[cur_validate_node];
225                 val_buf->bo = ttm_bo_reference(bo);
226                 val_buf->new_sync_obj_arg = (void *) dev_priv;
227                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
228                 ++sw_context->cur_val_buf;
229         }
230
231         ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
232                                       cmd->dma.host.sid, &srf);
233         if (ret) {
234                 DRM_ERROR("could not find surface\n");
235                 goto out_no_reloc;
236         }
237
238         vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
239         vmw_surface_unreference(&srf);
240
241 out_no_reloc:
242         vmw_dmabuf_unreference(&vmw_bo);
243         return ret;
244 }
245
246
247 typedef int (*vmw_cmd_func) (struct vmw_private *,
248                              struct vmw_sw_context *,
249                              SVGA3dCmdHeader *);
250
251 #define VMW_CMD_DEF(cmd, func) \
252         [cmd - SVGA_3D_CMD_BASE] = func
253
254 static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
255         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
256         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
257         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
258         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
259         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
260         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
261         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
262         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
263         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
264         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
265         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
266                     &vmw_cmd_set_render_target_check),
267         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
268         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
269         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
270         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
271         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
272         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
273         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
274         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
275         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
276         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
277         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
278         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
279         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
280         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
281         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
282         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
283         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
284         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
285         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
286                     &vmw_cmd_blt_surf_screen_check)
287 };
288
289 static int vmw_cmd_check(struct vmw_private *dev_priv,
290                          struct vmw_sw_context *sw_context,
291                          void *buf, uint32_t *size)
292 {
293         uint32_t cmd_id;
294         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
295         int ret;
296
297         cmd_id = ((uint32_t *)buf)[0];
298         if (cmd_id == SVGA_CMD_UPDATE) {
299                 *size = 5 << 2;
300                 return 0;
301         }
302
303         cmd_id = le32_to_cpu(header->id);
304         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
305
306         cmd_id -= SVGA_3D_CMD_BASE;
307         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
308                 goto out_err;
309
310         ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
311         if (unlikely(ret != 0))
312                 goto out_err;
313
314         return 0;
315 out_err:
316         DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
317                   cmd_id + SVGA_3D_CMD_BASE);
318         return -EINVAL;
319 }
320
321 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
322                              struct vmw_sw_context *sw_context,
323                              void *buf, uint32_t size)
324 {
325         int32_t cur_size = size;
326         int ret;
327
328         while (cur_size > 0) {
329                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
330                 if (unlikely(ret != 0))
331                         return ret;
332                 buf = (void *)((unsigned long) buf + size);
333                 cur_size -= size;
334         }
335
336         if (unlikely(cur_size != 0)) {
337                 DRM_ERROR("Command verifier out of sync.\n");
338                 return -EINVAL;
339         }
340
341         return 0;
342 }
343
344 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
345 {
346         sw_context->cur_reloc = 0;
347 }
348
349 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
350 {
351         uint32_t i;
352         struct vmw_relocation *reloc;
353         struct ttm_validate_buffer *validate;
354         struct ttm_buffer_object *bo;
355
356         for (i = 0; i < sw_context->cur_reloc; ++i) {
357                 reloc = &sw_context->relocs[i];
358                 validate = &sw_context->val_bufs[reloc->index];
359                 bo = validate->bo;
360                 reloc->location->offset += bo->offset;
361                 reloc->location->gmrId = vmw_dmabuf_gmr(bo);
362         }
363         vmw_free_relocations(sw_context);
364 }
365
366 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
367 {
368         struct ttm_validate_buffer *entry, *next;
369
370         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
371                                  head) {
372                 list_del(&entry->head);
373                 vmw_dmabuf_validate_clear(entry->bo);
374                 ttm_bo_unref(&entry->bo);
375                 sw_context->cur_val_buf--;
376         }
377         BUG_ON(sw_context->cur_val_buf != 0);
378 }
379
380 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
381                                       struct ttm_buffer_object *bo)
382 {
383         int ret;
384
385         if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
386                 return 0;
387
388         ret = vmw_gmr_bind(dev_priv, bo);
389         if (likely(ret == 0 || ret == -ERESTART))
390                 return ret;
391
392
393         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
394         return ret;
395 }
396
397
398 static int vmw_validate_buffers(struct vmw_private *dev_priv,
399                                 struct vmw_sw_context *sw_context)
400 {
401         struct ttm_validate_buffer *entry;
402         int ret;
403
404         list_for_each_entry(entry, &sw_context->validate_nodes, head) {
405                 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
406                 if (unlikely(ret != 0))
407                         return ret;
408         }
409         return 0;
410 }
411
412 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
413                       struct drm_file *file_priv)
414 {
415         struct vmw_private *dev_priv = vmw_priv(dev);
416         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
417         struct drm_vmw_fence_rep fence_rep;
418         struct drm_vmw_fence_rep __user *user_fence_rep;
419         int ret;
420         void *user_cmd;
421         void *cmd;
422         uint32_t sequence;
423         struct vmw_sw_context *sw_context = &dev_priv->ctx;
424         struct vmw_master *vmaster = vmw_master(file_priv->master);
425
426         ret = ttm_read_lock(&vmaster->lock, true);
427         if (unlikely(ret != 0))
428                 return ret;
429
430         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
431         if (unlikely(ret != 0)) {
432                 ret = -ERESTART;
433                 goto out_no_cmd_mutex;
434         }
435
436         cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
437         if (unlikely(cmd == NULL)) {
438                 DRM_ERROR("Failed reserving fifo space for commands.\n");
439                 ret = -ENOMEM;
440                 goto out_unlock;
441         }
442
443         user_cmd = (void __user *)(unsigned long)arg->commands;
444         ret = copy_from_user(cmd, user_cmd, arg->command_size);
445
446         if (unlikely(ret != 0)) {
447                 DRM_ERROR("Failed copying commands.\n");
448                 goto out_commit;
449         }
450
451         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
452         sw_context->cid_valid = false;
453         sw_context->sid_valid = false;
454         sw_context->cur_reloc = 0;
455         sw_context->cur_val_buf = 0;
456
457         INIT_LIST_HEAD(&sw_context->validate_nodes);
458
459         ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
460         if (unlikely(ret != 0))
461                 goto out_err;
462         ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
463                                      dev_priv->val_seq++);
464         if (unlikely(ret != 0))
465                 goto out_err;
466
467         ret = vmw_validate_buffers(dev_priv, sw_context);
468         if (unlikely(ret != 0))
469                 goto out_err;
470
471         vmw_apply_relocations(sw_context);
472         vmw_fifo_commit(dev_priv, arg->command_size);
473
474         ret = vmw_fifo_send_fence(dev_priv, &sequence);
475
476         ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
477                                     (void *)(unsigned long) sequence);
478         vmw_clear_validations(sw_context);
479         mutex_unlock(&dev_priv->cmdbuf_mutex);
480
481         /*
482          * This error is harmless, because if fence submission fails,
483          * vmw_fifo_send_fence will sync.
484          */
485
486         if (ret != 0)
487                 DRM_ERROR("Fence submission error. Syncing.\n");
488
489         fence_rep.error = ret;
490         fence_rep.fence_seq = (uint64_t) sequence;
491
492         user_fence_rep = (struct drm_vmw_fence_rep __user *)
493             (unsigned long)arg->fence_rep;
494
495         /*
496          * copy_to_user errors will be detected by user space not
497          * seeing fence_rep::error filled in.
498          */
499
500         ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
501
502         vmw_kms_cursor_post_execbuf(dev_priv);
503         ttm_read_unlock(&vmaster->lock);
504         return 0;
505 out_err:
506         vmw_free_relocations(sw_context);
507         ttm_eu_backoff_reservation(&sw_context->validate_nodes);
508         vmw_clear_validations(sw_context);
509 out_commit:
510         vmw_fifo_commit(dev_priv, 0);
511 out_unlock:
512         mutex_unlock(&dev_priv->cmdbuf_mutex);
513 out_no_cmd_mutex:
514         ttm_read_unlock(&vmaster->lock);
515         return ret;
516 }