Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include "ttm/ttm_bo_api.h"
31 #include "ttm/ttm_placement.h"
32
33 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34                            struct vmw_sw_context *sw_context,
35                            SVGA3dCmdHeader *header)
36 {
37         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38 }
39
40 static int vmw_cmd_ok(struct vmw_private *dev_priv,
41                       struct vmw_sw_context *sw_context,
42                       SVGA3dCmdHeader *header)
43 {
44         return 0;
45 }
46
47 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48                              struct vmw_sw_context *sw_context,
49                              SVGA3dCmdHeader *header)
50 {
51         struct vmw_cid_cmd {
52                 SVGA3dCmdHeader header;
53                 __le32 cid;
54         } *cmd;
55         int ret;
56
57         cmd = container_of(header, struct vmw_cid_cmd, header);
58         if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59                 return 0;
60
61         ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62         if (unlikely(ret != 0)) {
63                 DRM_ERROR("Could not find or use context %u\n",
64                           (unsigned) cmd->cid);
65                 return ret;
66         }
67
68         sw_context->last_cid = cmd->cid;
69         sw_context->cid_valid = true;
70
71         return 0;
72 }
73
74 static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75                              struct vmw_sw_context *sw_context,
76                              uint32_t *sid)
77 {
78         if (*sid == SVGA3D_INVALID_ID)
79                 return 0;
80
81         if (unlikely((!sw_context->sid_valid  ||
82                       *sid != sw_context->last_sid))) {
83                 int real_id;
84                 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85                                             *sid, &real_id);
86
87                 if (unlikely(ret != 0)) {
88                         DRM_ERROR("Could ot find or use surface 0x%08x "
89                                   "address 0x%08lx\n",
90                                   (unsigned int) *sid,
91                                   (unsigned long) sid);
92                         return ret;
93                 }
94
95                 sw_context->last_sid = *sid;
96                 sw_context->sid_valid = true;
97                 *sid = real_id;
98                 sw_context->sid_translation = real_id;
99         } else
100                 *sid = sw_context->sid_translation;
101
102         return 0;
103 }
104
105
106 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107                                            struct vmw_sw_context *sw_context,
108                                            SVGA3dCmdHeader *header)
109 {
110         struct vmw_sid_cmd {
111                 SVGA3dCmdHeader header;
112                 SVGA3dCmdSetRenderTarget body;
113         } *cmd;
114         int ret;
115
116         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117         if (unlikely(ret != 0))
118                 return ret;
119
120         cmd = container_of(header, struct vmw_sid_cmd, header);
121         ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122         return ret;
123 }
124
125 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126                                       struct vmw_sw_context *sw_context,
127                                       SVGA3dCmdHeader *header)
128 {
129         struct vmw_sid_cmd {
130                 SVGA3dCmdHeader header;
131                 SVGA3dCmdSurfaceCopy body;
132         } *cmd;
133         int ret;
134
135         cmd = container_of(header, struct vmw_sid_cmd, header);
136         ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137         if (unlikely(ret != 0))
138                 return ret;
139         return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
140 }
141
142 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143                                      struct vmw_sw_context *sw_context,
144                                      SVGA3dCmdHeader *header)
145 {
146         struct vmw_sid_cmd {
147                 SVGA3dCmdHeader header;
148                 SVGA3dCmdSurfaceStretchBlt body;
149         } *cmd;
150         int ret;
151
152         cmd = container_of(header, struct vmw_sid_cmd, header);
153         ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
154         if (unlikely(ret != 0))
155                 return ret;
156         return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
157 }
158
159 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160                                          struct vmw_sw_context *sw_context,
161                                          SVGA3dCmdHeader *header)
162 {
163         struct vmw_sid_cmd {
164                 SVGA3dCmdHeader header;
165                 SVGA3dCmdBlitSurfaceToScreen body;
166         } *cmd;
167
168         cmd = container_of(header, struct vmw_sid_cmd, header);
169         return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
170 }
171
172 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173                                  struct vmw_sw_context *sw_context,
174                                  SVGA3dCmdHeader *header)
175 {
176         struct vmw_sid_cmd {
177                 SVGA3dCmdHeader header;
178                 SVGA3dCmdPresent body;
179         } *cmd;
180
181         cmd = container_of(header, struct vmw_sid_cmd, header);
182         return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183 }
184
185 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186                                    struct vmw_sw_context *sw_context,
187                                    SVGAGuestPtr *ptr,
188                                    struct vmw_dma_buffer **vmw_bo_p)
189 {
190         struct vmw_dma_buffer *vmw_bo = NULL;
191         struct ttm_buffer_object *bo;
192         uint32_t handle = ptr->gmrId;
193         struct vmw_relocation *reloc;
194         uint32_t cur_validate_node;
195         struct ttm_validate_buffer *val_buf;
196         int ret;
197
198         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199         if (unlikely(ret != 0)) {
200                 DRM_ERROR("Could not find or use GMR region.\n");
201                 return -EINVAL;
202         }
203         bo = &vmw_bo->base;
204
205         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
206                 DRM_ERROR("Max number relocations per submission"
207                           " exceeded\n");
208                 ret = -EINVAL;
209                 goto out_no_reloc;
210         }
211
212         reloc = &sw_context->relocs[sw_context->cur_reloc++];
213         reloc->location = ptr;
214
215         cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216         if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217                 DRM_ERROR("Max number of DMA buffers per submission"
218                           " exceeded.\n");
219                 ret = -EINVAL;
220                 goto out_no_reloc;
221         }
222
223         reloc->index = cur_validate_node;
224         if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225                 val_buf = &sw_context->val_bufs[cur_validate_node];
226                 val_buf->bo = ttm_bo_reference(bo);
227                 val_buf->new_sync_obj_arg = (void *) dev_priv;
228                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229                 ++sw_context->cur_val_buf;
230         }
231         *vmw_bo_p = vmw_bo;
232         return 0;
233
234 out_no_reloc:
235         vmw_dmabuf_unreference(&vmw_bo);
236         vmw_bo_p = NULL;
237         return ret;
238 }
239
240 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241                              struct vmw_sw_context *sw_context,
242                              SVGA3dCmdHeader *header)
243 {
244         struct vmw_dma_buffer *vmw_bo;
245         struct vmw_query_cmd {
246                 SVGA3dCmdHeader header;
247                 SVGA3dCmdEndQuery q;
248         } *cmd;
249         int ret;
250
251         cmd = container_of(header, struct vmw_query_cmd, header);
252         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253         if (unlikely(ret != 0))
254                 return ret;
255
256         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257                                       &cmd->q.guestResult,
258                                       &vmw_bo);
259         if (unlikely(ret != 0))
260                 return ret;
261
262         vmw_dmabuf_unreference(&vmw_bo);
263         return 0;
264 }
265
266 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267                               struct vmw_sw_context *sw_context,
268                               SVGA3dCmdHeader *header)
269 {
270         struct vmw_dma_buffer *vmw_bo;
271         struct vmw_query_cmd {
272                 SVGA3dCmdHeader header;
273                 SVGA3dCmdWaitForQuery q;
274         } *cmd;
275         int ret;
276
277         cmd = container_of(header, struct vmw_query_cmd, header);
278         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279         if (unlikely(ret != 0))
280                 return ret;
281
282         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283                                       &cmd->q.guestResult,
284                                       &vmw_bo);
285         if (unlikely(ret != 0))
286                 return ret;
287
288         vmw_dmabuf_unreference(&vmw_bo);
289         return 0;
290 }
291
292
293 static int vmw_cmd_dma(struct vmw_private *dev_priv,
294                        struct vmw_sw_context *sw_context,
295                        SVGA3dCmdHeader *header)
296 {
297         struct vmw_dma_buffer *vmw_bo = NULL;
298         struct ttm_buffer_object *bo;
299         struct vmw_surface *srf = NULL;
300         struct vmw_dma_cmd {
301                 SVGA3dCmdHeader header;
302                 SVGA3dCmdSurfaceDMA dma;
303         } *cmd;
304         int ret;
305
306         cmd = container_of(header, struct vmw_dma_cmd, header);
307         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308                                       &cmd->dma.guest.ptr,
309                                       &vmw_bo);
310         if (unlikely(ret != 0))
311                 return ret;
312
313         bo = &vmw_bo->base;
314         ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315                                              cmd->dma.host.sid, &srf);
316         if (ret) {
317                 DRM_ERROR("could not find surface\n");
318                 goto out_no_reloc;
319         }
320
321         /**
322          * Patch command stream with device SID.
323          */
324
325         cmd->dma.host.sid = srf->res.id;
326         vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327         /**
328          * FIXME: May deadlock here when called from the
329          * command parsing code.
330          */
331         vmw_surface_unreference(&srf);
332
333 out_no_reloc:
334         vmw_dmabuf_unreference(&vmw_bo);
335         return ret;
336 }
337
338 static int vmw_cmd_draw(struct vmw_private *dev_priv,
339                         struct vmw_sw_context *sw_context,
340                         SVGA3dCmdHeader *header)
341 {
342         struct vmw_draw_cmd {
343                 SVGA3dCmdHeader header;
344                 SVGA3dCmdDrawPrimitives body;
345         } *cmd;
346         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347                 (unsigned long)header + sizeof(*cmd));
348         SVGA3dPrimitiveRange *range;
349         uint32_t i;
350         uint32_t maxnum;
351         int ret;
352
353         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354         if (unlikely(ret != 0))
355                 return ret;
356
357         cmd = container_of(header, struct vmw_draw_cmd, header);
358         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361                 DRM_ERROR("Illegal number of vertex declarations.\n");
362                 return -EINVAL;
363         }
364
365         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366                 ret = vmw_cmd_sid_check(dev_priv, sw_context,
367                                         &decl->array.surfaceId);
368                 if (unlikely(ret != 0))
369                         return ret;
370         }
371
372         maxnum = (header->size - sizeof(cmd->body) -
373                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374         if (unlikely(cmd->body.numRanges > maxnum)) {
375                 DRM_ERROR("Illegal number of index ranges.\n");
376                 return -EINVAL;
377         }
378
379         range = (SVGA3dPrimitiveRange *) decl;
380         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381                 ret = vmw_cmd_sid_check(dev_priv, sw_context,
382                                         &range->indexArray.surfaceId);
383                 if (unlikely(ret != 0))
384                         return ret;
385         }
386         return 0;
387 }
388
389
390 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391                              struct vmw_sw_context *sw_context,
392                              SVGA3dCmdHeader *header)
393 {
394         struct vmw_tex_state_cmd {
395                 SVGA3dCmdHeader header;
396                 SVGA3dCmdSetTextureState state;
397         };
398
399         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400           ((unsigned long) header + header->size + sizeof(header));
401         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
403         int ret;
404
405         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
406         if (unlikely(ret != 0))
407                 return ret;
408
409         for (; cur_state < last_state; ++cur_state) {
410                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411                         continue;
412
413                 ret = vmw_cmd_sid_check(dev_priv, sw_context,
414                                         &cur_state->value);
415                 if (unlikely(ret != 0))
416                         return ret;
417         }
418
419         return 0;
420 }
421
422
423 typedef int (*vmw_cmd_func) (struct vmw_private *,
424                              struct vmw_sw_context *,
425                              SVGA3dCmdHeader *);
426
427 #define VMW_CMD_DEF(cmd, func) \
428         [cmd - SVGA_3D_CMD_BASE] = func
429
430 static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
441         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442                     &vmw_cmd_set_render_target_check),
443         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
444         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
455         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
456         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
458         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
460         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
461         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462                     &vmw_cmd_blt_surf_screen_check)
463 };
464
465 static int vmw_cmd_check(struct vmw_private *dev_priv,
466                          struct vmw_sw_context *sw_context,
467                          void *buf, uint32_t *size)
468 {
469         uint32_t cmd_id;
470         uint32_t size_remaining = *size;
471         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472         int ret;
473
474         cmd_id = ((uint32_t *)buf)[0];
475         if (cmd_id == SVGA_CMD_UPDATE) {
476                 *size = 5 << 2;
477                 return 0;
478         }
479
480         cmd_id = le32_to_cpu(header->id);
481         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483         cmd_id -= SVGA_3D_CMD_BASE;
484         if (unlikely(*size > size_remaining))
485                 goto out_err;
486
487         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488                 goto out_err;
489
490         ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
491         if (unlikely(ret != 0))
492                 goto out_err;
493
494         return 0;
495 out_err:
496         DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497                   cmd_id + SVGA_3D_CMD_BASE);
498         return -EINVAL;
499 }
500
501 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502                              struct vmw_sw_context *sw_context,
503                              void *buf, uint32_t size)
504 {
505         int32_t cur_size = size;
506         int ret;
507
508         while (cur_size > 0) {
509                 size = cur_size;
510                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511                 if (unlikely(ret != 0))
512                         return ret;
513                 buf = (void *)((unsigned long) buf + size);
514                 cur_size -= size;
515         }
516
517         if (unlikely(cur_size != 0)) {
518                 DRM_ERROR("Command verifier out of sync.\n");
519                 return -EINVAL;
520         }
521
522         return 0;
523 }
524
525 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526 {
527         sw_context->cur_reloc = 0;
528 }
529
530 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531 {
532         uint32_t i;
533         struct vmw_relocation *reloc;
534         struct ttm_validate_buffer *validate;
535         struct ttm_buffer_object *bo;
536
537         for (i = 0; i < sw_context->cur_reloc; ++i) {
538                 reloc = &sw_context->relocs[i];
539                 validate = &sw_context->val_bufs[reloc->index];
540                 bo = validate->bo;
541                 if (bo->mem.mem_type == TTM_PL_VRAM) {
542                         reloc->location->offset += bo->offset;
543                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544                 } else
545                         reloc->location->gmrId = bo->mem.start;
546         }
547         vmw_free_relocations(sw_context);
548 }
549
550 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
551 {
552         struct ttm_validate_buffer *entry, *next;
553
554         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
555                                  head) {
556                 list_del(&entry->head);
557                 vmw_dmabuf_validate_clear(entry->bo);
558                 ttm_bo_unref(&entry->bo);
559                 sw_context->cur_val_buf--;
560         }
561         BUG_ON(sw_context->cur_val_buf != 0);
562 }
563
564 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
565                                       struct ttm_buffer_object *bo)
566 {
567         int ret;
568
569         /**
570          * Put BO in VRAM if there is space, otherwise as a GMR.
571          * If there is no space in VRAM and GMR ids are all used up,
572          * start evicting GMRs to make room. If the DMA buffer can't be
573          * used as a GMR, this will return -ENOMEM.
574          */
575
576         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
577         if (likely(ret == 0 || ret == -ERESTARTSYS))
578                 return ret;
579
580         /**
581          * If that failed, try VRAM again, this time evicting
582          * previous contents.
583          */
584
585         DRM_INFO("Falling through to VRAM.\n");
586         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
587         return ret;
588 }
589
590
591 static int vmw_validate_buffers(struct vmw_private *dev_priv,
592                                 struct vmw_sw_context *sw_context)
593 {
594         struct ttm_validate_buffer *entry;
595         int ret;
596
597         list_for_each_entry(entry, &sw_context->validate_nodes, head) {
598                 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
599                 if (unlikely(ret != 0))
600                         return ret;
601         }
602         return 0;
603 }
604
605 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606                       struct drm_file *file_priv)
607 {
608         struct vmw_private *dev_priv = vmw_priv(dev);
609         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
610         struct drm_vmw_fence_rep fence_rep;
611         struct drm_vmw_fence_rep __user *user_fence_rep;
612         int ret;
613         void *user_cmd;
614         void *cmd;
615         uint32_t sequence;
616         struct vmw_sw_context *sw_context = &dev_priv->ctx;
617         struct vmw_master *vmaster = vmw_master(file_priv->master);
618
619         ret = ttm_read_lock(&vmaster->lock, true);
620         if (unlikely(ret != 0))
621                 return ret;
622
623         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624         if (unlikely(ret != 0)) {
625                 ret = -ERESTARTSYS;
626                 goto out_no_cmd_mutex;
627         }
628
629         cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630         if (unlikely(cmd == NULL)) {
631                 DRM_ERROR("Failed reserving fifo space for commands.\n");
632                 ret = -ENOMEM;
633                 goto out_unlock;
634         }
635
636         user_cmd = (void __user *)(unsigned long)arg->commands;
637         ret = copy_from_user(cmd, user_cmd, arg->command_size);
638
639         if (unlikely(ret != 0)) {
640                 ret = -EFAULT;
641                 DRM_ERROR("Failed copying commands.\n");
642                 goto out_commit;
643         }
644
645         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646         sw_context->cid_valid = false;
647         sw_context->sid_valid = false;
648         sw_context->cur_reloc = 0;
649         sw_context->cur_val_buf = 0;
650
651         INIT_LIST_HEAD(&sw_context->validate_nodes);
652
653         ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
654         if (unlikely(ret != 0))
655                 goto out_err;
656         ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
657                                      dev_priv->val_seq++);
658         if (unlikely(ret != 0))
659                 goto out_err;
660
661         ret = vmw_validate_buffers(dev_priv, sw_context);
662         if (unlikely(ret != 0))
663                 goto out_err;
664
665         vmw_apply_relocations(sw_context);
666
667         if (arg->throttle_us) {
668                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
669                                    arg->throttle_us);
670
671                 if (unlikely(ret != 0))
672                         goto out_err;
673         }
674
675         vmw_fifo_commit(dev_priv, arg->command_size);
676
677         ret = vmw_fifo_send_fence(dev_priv, &sequence);
678
679         ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
680                                     (void *)(unsigned long) sequence);
681         vmw_clear_validations(sw_context);
682         mutex_unlock(&dev_priv->cmdbuf_mutex);
683
684         /*
685          * This error is harmless, because if fence submission fails,
686          * vmw_fifo_send_fence will sync.
687          */
688
689         if (ret != 0)
690                 DRM_ERROR("Fence submission error. Syncing.\n");
691
692         fence_rep.error = ret;
693         fence_rep.fence_seq = (uint64_t) sequence;
694
695         user_fence_rep = (struct drm_vmw_fence_rep __user *)
696             (unsigned long)arg->fence_rep;
697
698         /*
699          * copy_to_user errors will be detected by user space not
700          * seeing fence_rep::error filled in.
701          */
702
703         ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
704
705         vmw_kms_cursor_post_execbuf(dev_priv);
706         ttm_read_unlock(&vmaster->lock);
707         return 0;
708 out_err:
709         vmw_free_relocations(sw_context);
710         ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711         vmw_clear_validations(sw_context);
712 out_commit:
713         vmw_fifo_commit(dev_priv, 0);
714 out_unlock:
715         mutex_unlock(&dev_priv->cmdbuf_mutex);
716 out_no_cmd_mutex:
717         ttm_read_unlock(&vmaster->lock);
718         return ret;
719 }