Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 #include "drmP.h"
27 #include "drm.h"
28
29 #include "nouveau_drv.h"
30 #include "nouveau_drm.h"
31 #include "nouveau_dma.h"
32
33 #define nouveau_gem_pushbuf_sync(chan) 0
34
35 int
36 nouveau_gem_object_new(struct drm_gem_object *gem)
37 {
38         return 0;
39 }
40
41 void
42 nouveau_gem_object_del(struct drm_gem_object *gem)
43 {
44         struct nouveau_bo *nvbo = gem->driver_private;
45         struct ttm_buffer_object *bo = &nvbo->bo;
46
47         if (!nvbo)
48                 return;
49         nvbo->gem = NULL;
50
51         if (unlikely(nvbo->cpu_filp))
52                 ttm_bo_synccpu_write_release(bo);
53
54         if (unlikely(nvbo->pin_refcnt)) {
55                 nvbo->pin_refcnt = 1;
56                 nouveau_bo_unpin(nvbo);
57         }
58
59         ttm_bo_unref(&bo);
60
61         drm_gem_object_release(gem);
62         kfree(gem);
63 }
64
65 int
66 nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
67                 int size, int align, uint32_t flags, uint32_t tile_mode,
68                 uint32_t tile_flags, bool no_vm, bool mappable,
69                 struct nouveau_bo **pnvbo)
70 {
71         struct nouveau_bo *nvbo;
72         int ret;
73
74         ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
75                              tile_flags, no_vm, mappable, pnvbo);
76         if (ret)
77                 return ret;
78         nvbo = *pnvbo;
79
80         nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
81         if (!nvbo->gem) {
82                 nouveau_bo_ref(NULL, pnvbo);
83                 return -ENOMEM;
84         }
85
86         nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
87         nvbo->gem->driver_private = nvbo;
88         return 0;
89 }
90
91 static int
92 nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
93 {
94         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
95
96         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
97                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
98         else
99                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
100
101         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
102         rep->offset = nvbo->bo.offset;
103         rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
104         rep->tile_mode = nvbo->tile_mode;
105         rep->tile_flags = nvbo->tile_flags;
106         return 0;
107 }
108
109 static bool
110 nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
111         switch (tile_flags) {
112         case 0x0000:
113         case 0x1800:
114         case 0x2800:
115         case 0x4800:
116         case 0x7000:
117         case 0x7400:
118         case 0x7a00:
119         case 0xe000:
120                 break;
121         default:
122                 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
123                 return false;
124         }
125
126         return true;
127 }
128
129 int
130 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
131                       struct drm_file *file_priv)
132 {
133         struct drm_nouveau_private *dev_priv = dev->dev_private;
134         struct drm_nouveau_gem_new *req = data;
135         struct nouveau_bo *nvbo = NULL;
136         struct nouveau_channel *chan = NULL;
137         uint32_t flags = 0;
138         int ret = 0;
139
140         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
141
142         if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
143                 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
144
145         if (req->channel_hint) {
146                 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
147                                                      file_priv, chan);
148         }
149
150         if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
151                 flags |= TTM_PL_FLAG_VRAM;
152         if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
153                 flags |= TTM_PL_FLAG_TT;
154         if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
155                 flags |= TTM_PL_FLAG_SYSTEM;
156
157         if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
158                 return -EINVAL;
159
160         ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
161                               req->info.tile_mode, req->info.tile_flags, false,
162                               (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
163                               &nvbo);
164         if (ret)
165                 return ret;
166
167         ret = nouveau_gem_info(nvbo->gem, &req->info);
168         if (ret)
169                 goto out;
170
171         ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
172 out:
173         drm_gem_object_handle_unreference_unlocked(nvbo->gem);
174
175         if (ret)
176                 drm_gem_object_unreference_unlocked(nvbo->gem);
177         return ret;
178 }
179
180 static int
181 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
182                        uint32_t write_domains, uint32_t valid_domains)
183 {
184         struct nouveau_bo *nvbo = gem->driver_private;
185         struct ttm_buffer_object *bo = &nvbo->bo;
186         uint32_t domains = valid_domains &
187                 (write_domains ? write_domains : read_domains);
188         uint32_t pref_flags = 0, valid_flags = 0;
189
190         if (!domains)
191                 return -EINVAL;
192
193         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
194                 valid_flags |= TTM_PL_FLAG_VRAM;
195
196         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
197                 valid_flags |= TTM_PL_FLAG_TT;
198
199         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
200             bo->mem.mem_type == TTM_PL_VRAM)
201                 pref_flags |= TTM_PL_FLAG_VRAM;
202
203         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
204                  bo->mem.mem_type == TTM_PL_TT)
205                 pref_flags |= TTM_PL_FLAG_TT;
206
207         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
208                 pref_flags |= TTM_PL_FLAG_VRAM;
209
210         else
211                 pref_flags |= TTM_PL_FLAG_TT;
212
213         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
214
215         return 0;
216 }
217
218 struct validate_op {
219         struct list_head vram_list;
220         struct list_head gart_list;
221         struct list_head both_list;
222 };
223
224 static void
225 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
226 {
227         struct list_head *entry, *tmp;
228         struct nouveau_bo *nvbo;
229
230         list_for_each_safe(entry, tmp, list) {
231                 nvbo = list_entry(entry, struct nouveau_bo, entry);
232                 if (likely(fence)) {
233                         struct nouveau_fence *prev_fence;
234
235                         spin_lock(&nvbo->bo.lock);
236                         prev_fence = nvbo->bo.sync_obj;
237                         nvbo->bo.sync_obj = nouveau_fence_ref(fence);
238                         spin_unlock(&nvbo->bo.lock);
239                         nouveau_fence_unref((void *)&prev_fence);
240                 }
241
242                 if (unlikely(nvbo->validate_mapped)) {
243                         ttm_bo_kunmap(&nvbo->kmap);
244                         nvbo->validate_mapped = false;
245                 }
246
247                 list_del(&nvbo->entry);
248                 nvbo->reserved_by = NULL;
249                 ttm_bo_unreserve(&nvbo->bo);
250                 drm_gem_object_unreference(nvbo->gem);
251         }
252 }
253
254 static void
255 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
256 {
257         validate_fini_list(&op->vram_list, fence);
258         validate_fini_list(&op->gart_list, fence);
259         validate_fini_list(&op->both_list, fence);
260 }
261
262 static int
263 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
264               struct drm_nouveau_gem_pushbuf_bo *pbbo,
265               int nr_buffers, struct validate_op *op)
266 {
267         struct drm_device *dev = chan->dev;
268         struct drm_nouveau_private *dev_priv = dev->dev_private;
269         uint32_t sequence;
270         int trycnt = 0;
271         int ret, i;
272
273         sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
274 retry:
275         if (++trycnt > 100000) {
276                 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
277                 return -EINVAL;
278         }
279
280         for (i = 0; i < nr_buffers; i++) {
281                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
282                 struct drm_gem_object *gem;
283                 struct nouveau_bo *nvbo;
284
285                 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
286                 if (!gem) {
287                         NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
288                         validate_fini(op, NULL);
289                         return -EINVAL;
290                 }
291                 nvbo = gem->driver_private;
292
293                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
294                         NV_ERROR(dev, "multiple instances of buffer %d on "
295                                       "validation list\n", b->handle);
296                         validate_fini(op, NULL);
297                         return -EINVAL;
298                 }
299
300                 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
301                 if (ret) {
302                         validate_fini(op, NULL);
303                         if (ret == -EAGAIN)
304                                 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
305                         drm_gem_object_unreference(gem);
306                         if (ret) {
307                                 NV_ERROR(dev, "fail reserve\n");
308                                 return ret;
309                         }
310                         goto retry;
311                 }
312
313                 b->user_priv = (uint64_t)(unsigned long)nvbo;
314                 nvbo->reserved_by = file_priv;
315                 nvbo->pbbo_index = i;
316                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
317                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
318                         list_add_tail(&nvbo->entry, &op->both_list);
319                 else
320                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
321                         list_add_tail(&nvbo->entry, &op->vram_list);
322                 else
323                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
324                         list_add_tail(&nvbo->entry, &op->gart_list);
325                 else {
326                         NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
327                                  b->valid_domains);
328                         list_add_tail(&nvbo->entry, &op->both_list);
329                         validate_fini(op, NULL);
330                         return -EINVAL;
331                 }
332
333                 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
334                         validate_fini(op, NULL);
335
336                         if (nvbo->cpu_filp == file_priv) {
337                                 NV_ERROR(dev, "bo %p mapped by process trying "
338                                               "to validate it!\n", nvbo);
339                                 return -EINVAL;
340                         }
341
342                         ret = ttm_bo_wait_cpu(&nvbo->bo, false);
343                         if (ret) {
344                                 NV_ERROR(dev, "fail wait_cpu\n");
345                                 return ret;
346                         }
347                         goto retry;
348                 }
349         }
350
351         return 0;
352 }
353
354 static int
355 validate_list(struct nouveau_channel *chan, struct list_head *list,
356               struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
357 {
358         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
359                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
360         struct drm_device *dev = chan->dev;
361         struct nouveau_bo *nvbo;
362         int ret, relocs = 0;
363
364         list_for_each_entry(nvbo, list, entry) {
365                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
366                 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
367
368                 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
369                         spin_lock(&nvbo->bo.lock);
370                         ret = ttm_bo_wait(&nvbo->bo, false, false, false);
371                         spin_unlock(&nvbo->bo.lock);
372                         if (unlikely(ret)) {
373                                 NV_ERROR(dev, "fail wait other chan\n");
374                                 return ret;
375                         }
376                 }
377
378                 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
379                                              b->write_domains,
380                                              b->valid_domains);
381                 if (unlikely(ret)) {
382                         NV_ERROR(dev, "fail set_domain\n");
383                         return ret;
384                 }
385
386                 nvbo->channel = chan;
387                 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
388                                       false, false, false);
389                 nvbo->channel = NULL;
390                 if (unlikely(ret)) {
391                         NV_ERROR(dev, "fail ttm_validate\n");
392                         return ret;
393                 }
394
395                 if (nvbo->bo.offset == b->presumed.offset &&
396                     ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
397                       b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
398                      (nvbo->bo.mem.mem_type == TTM_PL_TT &&
399                       b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
400                         continue;
401
402                 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
403                         b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
404                 else
405                         b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
406                 b->presumed.offset = nvbo->bo.offset;
407                 b->presumed.valid = 0;
408                 relocs++;
409
410                 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
411                                      &b->presumed, sizeof(b->presumed)))
412                         return -EFAULT;
413         }
414
415         return relocs;
416 }
417
418 static int
419 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
420                              struct drm_file *file_priv,
421                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
422                              uint64_t user_buffers, int nr_buffers,
423                              struct validate_op *op, int *apply_relocs)
424 {
425         struct drm_device *dev = chan->dev;
426         int ret, relocs = 0;
427
428         INIT_LIST_HEAD(&op->vram_list);
429         INIT_LIST_HEAD(&op->gart_list);
430         INIT_LIST_HEAD(&op->both_list);
431
432         if (nr_buffers == 0)
433                 return 0;
434
435         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
436         if (unlikely(ret)) {
437                 NV_ERROR(dev, "validate_init\n");
438                 return ret;
439         }
440
441         ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
442         if (unlikely(ret < 0)) {
443                 NV_ERROR(dev, "validate vram_list\n");
444                 validate_fini(op, NULL);
445                 return ret;
446         }
447         relocs += ret;
448
449         ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
450         if (unlikely(ret < 0)) {
451                 NV_ERROR(dev, "validate gart_list\n");
452                 validate_fini(op, NULL);
453                 return ret;
454         }
455         relocs += ret;
456
457         ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
458         if (unlikely(ret < 0)) {
459                 NV_ERROR(dev, "validate both_list\n");
460                 validate_fini(op, NULL);
461                 return ret;
462         }
463         relocs += ret;
464
465         *apply_relocs = relocs;
466         return 0;
467 }
468
469 static inline void *
470 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
471 {
472         void *mem;
473         void __user *userptr = (void __force __user *)(uintptr_t)user;
474
475         mem = kmalloc(nmemb * size, GFP_KERNEL);
476         if (!mem)
477                 return ERR_PTR(-ENOMEM);
478
479         if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
480                 kfree(mem);
481                 return ERR_PTR(-EFAULT);
482         }
483
484         return mem;
485 }
486
487 static int
488 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
489                                 struct drm_nouveau_gem_pushbuf *req,
490                                 struct drm_nouveau_gem_pushbuf_bo *bo)
491 {
492         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
493         int ret = 0;
494         unsigned i;
495
496         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
497         if (IS_ERR(reloc))
498                 return PTR_ERR(reloc);
499
500         for (i = 0; i < req->nr_relocs; i++) {
501                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
502                 struct drm_nouveau_gem_pushbuf_bo *b;
503                 struct nouveau_bo *nvbo;
504                 uint32_t data;
505
506                 if (unlikely(r->bo_index > req->nr_buffers)) {
507                         NV_ERROR(dev, "reloc bo index invalid\n");
508                         ret = -EINVAL;
509                         break;
510                 }
511
512                 b = &bo[r->bo_index];
513                 if (b->presumed.valid)
514                         continue;
515
516                 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
517                         NV_ERROR(dev, "reloc container bo index invalid\n");
518                         ret = -EINVAL;
519                         break;
520                 }
521                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
522
523                 if (unlikely(r->reloc_bo_offset + 4 >
524                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
525                         NV_ERROR(dev, "reloc outside of bo\n");
526                         ret = -EINVAL;
527                         break;
528                 }
529
530                 if (!nvbo->kmap.virtual) {
531                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
532                                           &nvbo->kmap);
533                         if (ret) {
534                                 NV_ERROR(dev, "failed kmap for reloc\n");
535                                 break;
536                         }
537                         nvbo->validate_mapped = true;
538                 }
539
540                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
541                         data = b->presumed.offset + r->data;
542                 else
543                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
544                         data = (b->presumed.offset + r->data) >> 32;
545                 else
546                         data = r->data;
547
548                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
549                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
550                                 data |= r->tor;
551                         else
552                                 data |= r->vor;
553                 }
554
555                 spin_lock(&nvbo->bo.lock);
556                 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
557                 spin_unlock(&nvbo->bo.lock);
558                 if (ret) {
559                         NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
560                         break;
561                 }
562
563                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
564         }
565
566         kfree(reloc);
567         return ret;
568 }
569
570 int
571 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
572                           struct drm_file *file_priv)
573 {
574         struct drm_nouveau_private *dev_priv = dev->dev_private;
575         struct drm_nouveau_gem_pushbuf *req = data;
576         struct drm_nouveau_gem_pushbuf_push *push;
577         struct drm_nouveau_gem_pushbuf_bo *bo;
578         struct nouveau_channel *chan;
579         struct validate_op op;
580         struct nouveau_fence *fence = 0;
581         int i, j, ret = 0, do_reloc = 0;
582
583         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
584         NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
585
586         req->vram_available = dev_priv->fb_aper_free;
587         req->gart_available = dev_priv->gart_info.aper_free;
588         if (unlikely(req->nr_push == 0))
589                 goto out_next;
590
591         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
592                 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
593                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
594                 return -EINVAL;
595         }
596
597         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
598                 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
599                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
600                 return -EINVAL;
601         }
602
603         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
604                 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
605                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
606                 return -EINVAL;
607         }
608
609         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
610         if (IS_ERR(push))
611                 return PTR_ERR(push);
612
613         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
614         if (IS_ERR(bo)) {
615                 kfree(push);
616                 return PTR_ERR(bo);
617         }
618
619         mutex_lock(&dev->struct_mutex);
620
621         /* Validate buffer list */
622         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
623                                            req->nr_buffers, &op, &do_reloc);
624         if (ret) {
625                 NV_ERROR(dev, "validate: %d\n", ret);
626                 goto out;
627         }
628
629         /* Apply any relocations that are required */
630         if (do_reloc) {
631                 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
632                 if (ret) {
633                         NV_ERROR(dev, "reloc apply: %d\n", ret);
634                         goto out;
635                 }
636         }
637
638         if (chan->dma.ib_max) {
639                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
640                 if (ret) {
641                         NV_INFO(dev, "nv50cal_space: %d\n", ret);
642                         goto out;
643                 }
644
645                 for (i = 0; i < req->nr_push; i++) {
646                         struct nouveau_bo *nvbo = (void *)(unsigned long)
647                                 bo[push[i].bo_index].user_priv;
648
649                         nv50_dma_push(chan, nvbo, push[i].offset,
650                                       push[i].length);
651                 }
652         } else
653         if (dev_priv->card_type >= NV_20) {
654                 ret = RING_SPACE(chan, req->nr_push * 2);
655                 if (ret) {
656                         NV_ERROR(dev, "cal_space: %d\n", ret);
657                         goto out;
658                 }
659
660                 for (i = 0; i < req->nr_push; i++) {
661                         struct nouveau_bo *nvbo = (void *)(unsigned long)
662                                 bo[push[i].bo_index].user_priv;
663                         struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
664
665                         OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
666                                         push[i].offset) | 2);
667                         OUT_RING(chan, 0);
668                 }
669         } else {
670                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
671                 if (ret) {
672                         NV_ERROR(dev, "jmp_space: %d\n", ret);
673                         goto out;
674                 }
675
676                 for (i = 0; i < req->nr_push; i++) {
677                         struct nouveau_bo *nvbo = (void *)(unsigned long)
678                                 bo[push[i].bo_index].user_priv;
679                         struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
680                         uint32_t cmd;
681
682                         cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
683                         cmd |= 0x20000000;
684                         if (unlikely(cmd != req->suffix0)) {
685                                 if (!nvbo->kmap.virtual) {
686                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
687                                                           nvbo->bo.mem.
688                                                           num_pages,
689                                                           &nvbo->kmap);
690                                         if (ret) {
691                                                 WIND_RING(chan);
692                                                 goto out;
693                                         }
694                                         nvbo->validate_mapped = true;
695                                 }
696
697                                 nouveau_bo_wr32(nvbo, (push[i].offset +
698                                                 push[i].length - 8) / 4, cmd);
699                         }
700
701                         OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
702                                         push[i].offset) | 0x20000000);
703                         OUT_RING(chan, 0);
704                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
705                                 OUT_RING(chan, 0);
706                 }
707         }
708
709         ret = nouveau_fence_new(chan, &fence, true);
710         if (ret) {
711                 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
712                 WIND_RING(chan);
713                 goto out;
714         }
715
716 out:
717         validate_fini(&op, fence);
718         nouveau_fence_unref((void**)&fence);
719         mutex_unlock(&dev->struct_mutex);
720         kfree(bo);
721         kfree(push);
722
723 out_next:
724         if (chan->dma.ib_max) {
725                 req->suffix0 = 0x00000000;
726                 req->suffix1 = 0x00000000;
727         } else
728         if (dev_priv->card_type >= NV_20) {
729                 req->suffix0 = 0x00020000;
730                 req->suffix1 = 0x00000000;
731         } else {
732                 req->suffix0 = 0x20000000 |
733                               (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
734                 req->suffix1 = 0x00000000;
735         }
736
737         return ret;
738 }
739
740 static inline uint32_t
741 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
742 {
743         uint32_t flags = 0;
744
745         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
746                 flags |= TTM_PL_FLAG_VRAM;
747         if (domain & NOUVEAU_GEM_DOMAIN_GART)
748                 flags |= TTM_PL_FLAG_TT;
749
750         return flags;
751 }
752
753 int
754 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
755                            struct drm_file *file_priv)
756 {
757         struct drm_nouveau_gem_cpu_prep *req = data;
758         struct drm_gem_object *gem;
759         struct nouveau_bo *nvbo;
760         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
761         int ret = -EINVAL;
762
763         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
764
765         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
766         if (!gem)
767                 return ret;
768         nvbo = nouveau_gem_object(gem);
769
770         if (nvbo->cpu_filp) {
771                 if (nvbo->cpu_filp == file_priv)
772                         goto out;
773
774                 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
775                 if (ret)
776                         goto out;
777         }
778
779         if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
780                 spin_lock(&nvbo->bo.lock);
781                 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
782                 spin_unlock(&nvbo->bo.lock);
783         } else {
784                 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
785                 if (ret == 0)
786                         nvbo->cpu_filp = file_priv;
787         }
788
789 out:
790         drm_gem_object_unreference_unlocked(gem);
791         return ret;
792 }
793
794 int
795 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
796                            struct drm_file *file_priv)
797 {
798         struct drm_nouveau_gem_cpu_prep *req = data;
799         struct drm_gem_object *gem;
800         struct nouveau_bo *nvbo;
801         int ret = -EINVAL;
802
803         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
804
805         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
806         if (!gem)
807                 return ret;
808         nvbo = nouveau_gem_object(gem);
809
810         if (nvbo->cpu_filp != file_priv)
811                 goto out;
812         nvbo->cpu_filp = NULL;
813
814         ttm_bo_synccpu_write_release(&nvbo->bo);
815         ret = 0;
816
817 out:
818         drm_gem_object_unreference_unlocked(gem);
819         return ret;
820 }
821
822 int
823 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
824                        struct drm_file *file_priv)
825 {
826         struct drm_nouveau_gem_info *req = data;
827         struct drm_gem_object *gem;
828         int ret;
829
830         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
831
832         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
833         if (!gem)
834                 return -EINVAL;
835
836         ret = nouveau_gem_info(gem, req);
837         drm_gem_object_unreference_unlocked(gem);
838         return ret;
839 }
840