2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_ramht.h"
38 #include "nouveau_vm.h"
39 #include "nv50_display.h"
41 struct nouveau_gpuobj_method {
42 struct list_head head;
44 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
47 struct nouveau_gpuobj_class {
48 struct list_head head;
49 struct list_head methods;
55 nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_gpuobj_class *oc;
60 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
64 INIT_LIST_HEAD(&oc->methods);
67 list_add(&oc->head, &dev_priv->classes);
72 nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
73 int (*exec)(struct nouveau_channel *, u32, u32, u32))
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_gpuobj_method *om;
77 struct nouveau_gpuobj_class *oc;
79 list_for_each_entry(oc, &dev_priv->classes, head) {
87 om = kzalloc(sizeof(*om), GFP_KERNEL);
93 list_add(&om->head, &oc->methods);
98 nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
99 u32 class, u32 mthd, u32 data)
101 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
102 struct nouveau_gpuobj_method *om;
103 struct nouveau_gpuobj_class *oc;
105 list_for_each_entry(oc, &dev_priv->classes, head) {
109 list_for_each_entry(om, &oc->methods, head) {
110 if (om->mthd == mthd)
111 return om->exec(chan, class, mthd, data);
119 nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120 u32 class, u32 mthd, u32 data)
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_channel *chan = NULL;
127 spin_lock_irqsave(&dev_priv->channels.lock, flags);
128 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
129 chan = dev_priv->channels.ptr[chid];
131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
132 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
136 /* NVidia uses context objects to drive drawing operations.
138 Context objects can be selected into 8 subchannels in the FIFO,
139 and then used via DMA command buffers.
141 A context object is referenced by a user defined handle (CARD32). The HW
142 looks up graphics objects in a hash table in the instance RAM.
144 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
145 the handle, the second one a bitfield, that contains the address of the
146 object in instance RAM.
148 The format of the second CARD32 seems to be:
152 15: 0 instance_addr >> 4
153 17:16 engine (here uses 1 = graphics)
154 28:24 channel id (here uses 0)
159 15: 0 instance_addr >> 4 (maybe 19-0)
160 21:20 engine (here uses 1 = graphics)
161 I'm unsure about the other bits, but using 0 seems to work.
163 The key into the hash table depends on the object handle and channel id and
168 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
169 uint32_t size, int align, uint32_t flags,
170 struct nouveau_gpuobj **gpuobj_ret)
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
174 struct nouveau_gpuobj *gpuobj;
175 struct drm_mm_node *ramin = NULL;
178 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
179 chan ? chan->id : -1, size, align, flags);
181 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
184 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
186 gpuobj->flags = flags;
187 kref_init(&gpuobj->refcount);
190 spin_lock(&dev_priv->ramin_lock);
191 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
192 spin_unlock(&dev_priv->ramin_lock);
195 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
197 ramin = drm_mm_get_block(ramin, size, align);
199 nouveau_gpuobj_ref(NULL, &gpuobj);
203 gpuobj->pinst = chan->ramin->pinst;
204 if (gpuobj->pinst != ~0)
205 gpuobj->pinst += ramin->start;
207 gpuobj->cinst = ramin->start;
208 gpuobj->vinst = ramin->start + chan->ramin->vinst;
209 gpuobj->node = ramin;
211 ret = instmem->get(gpuobj, size, align);
213 nouveau_gpuobj_ref(NULL, &gpuobj);
218 if (!(flags & NVOBJ_FLAG_DONT_MAP))
219 ret = instmem->map(gpuobj);
223 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
226 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
227 for (i = 0; i < gpuobj->size; i += 4)
228 nv_wo32(gpuobj, i, 0);
233 *gpuobj_ret = gpuobj;
238 nouveau_gpuobj_init(struct drm_device *dev)
240 struct drm_nouveau_private *dev_priv = dev->dev_private;
244 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
245 INIT_LIST_HEAD(&dev_priv->classes);
246 spin_lock_init(&dev_priv->ramin_lock);
247 dev_priv->ramin_base = ~0;
253 nouveau_gpuobj_takedown(struct drm_device *dev)
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_gpuobj_method *om, *tm;
257 struct nouveau_gpuobj_class *oc, *tc;
261 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
262 list_for_each_entry_safe(om, tm, &oc->methods, head) {
270 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
275 nouveau_gpuobj_del(struct kref *ref)
277 struct nouveau_gpuobj *gpuobj =
278 container_of(ref, struct nouveau_gpuobj, refcount);
279 struct drm_device *dev = gpuobj->dev;
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
284 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
286 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
287 for (i = 0; i < gpuobj->size; i += 4)
288 nv_wo32(gpuobj, i, 0);
293 gpuobj->dtor(dev, gpuobj);
295 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
297 instmem->unmap(gpuobj);
298 instmem->put(gpuobj);
302 spin_lock(&dev_priv->ramin_lock);
303 drm_mm_put_block(gpuobj->node);
304 spin_unlock(&dev_priv->ramin_lock);
308 spin_lock(&dev_priv->ramin_lock);
309 list_del(&gpuobj->list);
310 spin_unlock(&dev_priv->ramin_lock);
316 nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
319 kref_get(&ref->refcount);
322 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
328 nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
329 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
331 struct drm_nouveau_private *dev_priv = dev->dev_private;
332 struct nouveau_gpuobj *gpuobj = NULL;
336 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
337 pinst, vinst, size, flags);
339 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
342 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
344 gpuobj->flags = flags;
345 kref_init(&gpuobj->refcount);
347 gpuobj->pinst = pinst;
348 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
349 gpuobj->vinst = vinst;
351 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
352 for (i = 0; i < gpuobj->size; i += 4)
353 nv_wo32(gpuobj, i, 0);
354 dev_priv->engine.instmem.flush(dev);
357 spin_lock(&dev_priv->ramin_lock);
358 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
359 spin_unlock(&dev_priv->ramin_lock);
365 DMA objects are used to reference a piece of memory in the
366 framebuffer, PCI or AGP address space. Each object is 16 bytes big
367 and looks as follows:
370 11:0 class (seems like I can always use 0 here)
371 12 page table present?
372 13 page entry linear?
373 15:14 access: 0 rw, 1 ro, 2 wo
374 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
375 31:20 dma adjust (bits 0-11 of the address)
377 dma limit (size of transfer)
379 1 0 readonly, 1 readwrite
380 31:12 dma frame address of the page (bits 12-31 of the address)
382 page table terminator, same value as the first pte, as does nvidia
383 rivatv uses 0xffffffff
385 Non linear page tables need a list of frame addresses afterwards,
386 the rivatv project has some info on this.
388 The method below creates a DMA object in instance RAM and returns a handle
389 to it that can be used to set up context objects.
393 nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
394 u64 base, u64 size, int target, int access,
397 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
398 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
401 flags0 = (comp << 29) | (type << 22) | class;
402 flags0 |= 0x00100000;
405 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
406 case NV_MEM_ACCESS_RW:
407 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
413 case NV_MEM_TARGET_VRAM:
414 flags0 |= 0x00010000;
416 case NV_MEM_TARGET_PCI:
417 flags0 |= 0x00020000;
419 case NV_MEM_TARGET_PCI_NOSNOOP:
420 flags0 |= 0x00030000;
422 case NV_MEM_TARGET_GART:
423 base += dev_priv->gart_info.aper_base;
425 flags0 &= ~0x00100000;
429 /* convert to base + limit */
430 size = (base + size) - 1;
432 nv_wo32(obj, offset + 0x00, flags0);
433 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
434 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
435 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
436 upper_32_bits(base));
437 nv_wo32(obj, offset + 0x10, 0x00000000);
438 nv_wo32(obj, offset + 0x14, 0x00000000);
440 pinstmem->flush(obj->dev);
444 nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
445 int target, int access, u32 type, u32 comp,
446 struct nouveau_gpuobj **pobj)
448 struct drm_device *dev = chan->dev;
451 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
455 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
461 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
462 u64 size, int access, int target,
463 struct nouveau_gpuobj **pobj)
465 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
466 struct drm_device *dev = chan->dev;
467 struct nouveau_gpuobj *obj;
471 if (dev_priv->card_type >= NV_50) {
472 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
473 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
475 return nv50_gpuobj_dma_new(chan, class, base, size,
476 target, access, type, comp, pobj);
479 if (target == NV_MEM_TARGET_GART) {
480 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
482 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
484 nouveau_gpuobj_ref(gart, pobj);
488 base = nouveau_sgdma_get_physical(dev, base);
489 target = NV_MEM_TARGET_PCI;
491 base += dev_priv->gart_info.aper_base;
492 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
493 target = NV_MEM_TARGET_PCI_NOSNOOP;
495 target = NV_MEM_TARGET_PCI;
500 flags0 |= 0x00003000; /* PT present, PT linear */
504 case NV_MEM_TARGET_PCI:
505 flags0 |= 0x00020000;
507 case NV_MEM_TARGET_PCI_NOSNOOP:
508 flags0 |= 0x00030000;
515 case NV_MEM_ACCESS_RO:
516 flags0 |= 0x00004000;
518 case NV_MEM_ACCESS_WO:
519 flags0 |= 0x00008000;
521 flags2 |= 0x00000002;
525 flags0 |= (base & 0x00000fff) << 20;
526 flags2 |= (base & 0xfffff000);
528 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
532 nv_wo32(obj, 0x00, flags0);
533 nv_wo32(obj, 0x04, size - 1);
534 nv_wo32(obj, 0x08, flags2);
535 nv_wo32(obj, 0x0c, flags2);
537 obj->engine = NVOBJ_ENGINE_SW;
543 /* Context objects in the instance RAM have the following structure.
544 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
554 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
555 18 synchronize enable
556 19 endian: 1 big, 0 little
558 23 single step enable
559 24 patch status: 0 invalid, 1 valid
560 25 context_surface 0: 1 valid
561 26 context surface 1: 1 valid
562 27 context pattern: 1 valid
563 28 context rop: 1 valid
564 29,30 context beta, beta4
568 31:16 notify instance address
570 15:0 dma 0 instance address
571 31:16 dma 1 instance address
576 No idea what the exact format is. Here's what can be deducted:
579 11:0 class (maybe uses more bits here?)
582 25 patch status valid ?
584 15:0 DMA notifier (maybe 20:0)
586 15:0 DMA 0 instance (maybe 20:0)
589 15:0 DMA 1 instance (maybe 20:0)
595 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
597 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
598 struct nouveau_gpuobj *gpuobj;
601 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
604 gpuobj->dev = chan->dev;
605 gpuobj->engine = NVOBJ_ENGINE_SW;
606 gpuobj->class = class;
607 kref_init(&gpuobj->refcount);
608 gpuobj->cinst = 0x40;
610 spin_lock(&dev_priv->ramin_lock);
611 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
612 spin_unlock(&dev_priv->ramin_lock);
614 ret = nouveau_ramht_insert(chan, handle, gpuobj);
615 nouveau_gpuobj_ref(NULL, &gpuobj);
620 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
622 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
623 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
624 struct drm_device *dev = chan->dev;
625 struct nouveau_gpuobj_class *oc;
628 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
630 list_for_each_entry(oc, &dev_priv->classes, head) {
635 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
639 if (!dev_priv->eng[oc->engine]) {
640 switch (oc->engine) {
641 case NVOBJ_ENGINE_SW:
642 return nouveau_gpuobj_sw_new(chan, handle, class);
643 case NVOBJ_ENGINE_GR:
644 if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
645 (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
646 ret = pgraph->create_context(chan);
651 return pgraph->object_new(chan, handle, class);
655 if (!chan->engctx[oc->engine]) {
656 ret = dev_priv->eng[oc->engine]->context_new(chan, oc->engine);
661 return dev_priv->eng[oc->engine]->object_new(chan, oc->engine, handle, class);
665 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
667 struct drm_device *dev = chan->dev;
668 struct drm_nouveau_private *dev_priv = dev->dev_private;
673 NV_DEBUG(dev, "ch%d\n", chan->id);
675 /* Base amount for object storage (4KiB enough?) */
680 size += dev_priv->engine.graph.grctx_size;
682 if (dev_priv->card_type == NV_50) {
683 /* Various fixed table thingos */
684 size += 0x1400; /* mostly unknown stuff */
685 size += 0x4000; /* vm pd */
687 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
693 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
695 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
699 ret = drm_mm_init(&chan->ramin_heap, base, size);
701 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
702 nouveau_gpuobj_ref(NULL, &chan->ramin);
710 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
711 uint32_t vram_h, uint32_t tt_h)
713 struct drm_device *dev = chan->dev;
714 struct drm_nouveau_private *dev_priv = dev->dev_private;
715 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
718 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
720 if (dev_priv->card_type == NV_C0) {
721 struct nouveau_vm *vm = dev_priv->chan_vm;
722 struct nouveau_vm_pgd *vpgd;
724 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
729 nouveau_vm_ref(vm, &chan->vm, NULL);
731 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
732 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
733 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
734 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
735 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
739 /* Allocate a chunk of memory for per-channel object storage */
740 ret = nouveau_gpuobj_channel_init_pramin(chan);
742 NV_ERROR(dev, "init pramin\n");
747 * - Allocate per-channel page-directory
748 * - Link with shared channel VM
750 if (dev_priv->chan_vm) {
751 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
752 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
753 u32 vm_pinst = chan->ramin->pinst;
756 vm_pinst += pgd_offs;
758 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
763 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
767 if (dev_priv->card_type < NV_50) {
768 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
770 struct nouveau_gpuobj *ramht = NULL;
772 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
773 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
777 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
778 nouveau_gpuobj_ref(NULL, &ramht);
782 /* dma objects for display sync channel semaphore blocks */
783 for (i = 0; i < 2; i++) {
784 struct nouveau_gpuobj *sem = NULL;
785 struct nv50_display_crtc *dispc =
786 &nv50_display(dev)->crtc[i];
787 u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
789 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
791 NV_MEM_TARGET_VRAM, &sem);
795 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
796 nouveau_gpuobj_ref(NULL, &sem);
803 if (dev_priv->card_type >= NV_50) {
804 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
805 0, (1ULL << 40), NV_MEM_ACCESS_RW,
806 NV_MEM_TARGET_VM, &vram);
808 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
812 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
813 0, dev_priv->fb_available_size,
815 NV_MEM_TARGET_VRAM, &vram);
817 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
822 ret = nouveau_ramht_insert(chan, vram_h, vram);
823 nouveau_gpuobj_ref(NULL, &vram);
825 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
829 /* TT memory ctxdma */
830 if (dev_priv->card_type >= NV_50) {
831 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
832 0, (1ULL << 40), NV_MEM_ACCESS_RW,
833 NV_MEM_TARGET_VM, &tt);
835 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
836 0, dev_priv->gart_info.aper_size,
838 NV_MEM_TARGET_GART, &tt);
842 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
846 ret = nouveau_ramht_insert(chan, tt_h, tt);
847 nouveau_gpuobj_ref(NULL, &tt);
849 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
857 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
859 struct drm_device *dev = chan->dev;
861 NV_DEBUG(dev, "ch%d\n", chan->id);
863 nouveau_ramht_ref(NULL, &chan->ramht, chan);
865 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
866 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
868 if (drm_mm_initialized(&chan->ramin_heap))
869 drm_mm_takedown(&chan->ramin_heap);
870 nouveau_gpuobj_ref(NULL, &chan->ramin);
874 nouveau_gpuobj_suspend(struct drm_device *dev)
876 struct drm_nouveau_private *dev_priv = dev->dev_private;
877 struct nouveau_gpuobj *gpuobj;
880 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
881 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
884 gpuobj->suspend = vmalloc(gpuobj->size);
885 if (!gpuobj->suspend) {
886 nouveau_gpuobj_resume(dev);
890 for (i = 0; i < gpuobj->size; i += 4)
891 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
898 nouveau_gpuobj_resume(struct drm_device *dev)
900 struct drm_nouveau_private *dev_priv = dev->dev_private;
901 struct nouveau_gpuobj *gpuobj;
904 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
905 if (!gpuobj->suspend)
908 for (i = 0; i < gpuobj->size; i += 4)
909 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
911 vfree(gpuobj->suspend);
912 gpuobj->suspend = NULL;
915 dev_priv->engine.instmem.flush(dev);
918 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
919 struct drm_file *file_priv)
921 struct drm_nouveau_grobj_alloc *init = data;
922 struct nouveau_channel *chan;
925 if (init->handle == ~0)
928 chan = nouveau_channel_get(dev, file_priv, init->channel);
930 return PTR_ERR(chan);
932 if (nouveau_ramht_find(chan, init->handle)) {
937 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
939 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
940 ret, init->channel, init->handle);
944 nouveau_channel_put(&chan);
948 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
949 struct drm_file *file_priv)
951 struct drm_nouveau_gpuobj_free *objfree = data;
952 struct nouveau_channel *chan;
955 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
957 return PTR_ERR(chan);
959 /* Synchronize with the user channel */
960 nouveau_channel_idle(chan);
962 ret = nouveau_ramht_remove(chan, objfree->handle);
963 nouveau_channel_put(&chan);
968 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
970 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
971 struct drm_device *dev = gpuobj->dev;
974 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
975 u64 ptr = gpuobj->vinst + offset;
976 u32 base = ptr >> 16;
979 spin_lock_irqsave(&dev_priv->vm_lock, flags);
980 if (dev_priv->ramin_base != base) {
981 dev_priv->ramin_base = base;
982 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
984 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
985 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
989 return nv_ri32(dev, gpuobj->pinst + offset);
993 nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
995 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
996 struct drm_device *dev = gpuobj->dev;
999 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1000 u64 ptr = gpuobj->vinst + offset;
1001 u32 base = ptr >> 16;
1003 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1004 if (dev_priv->ramin_base != base) {
1005 dev_priv->ramin_base = base;
1006 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1008 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1009 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1013 nv_wi32(dev, gpuobj->pinst + offset, val);