2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
30 #include "nouveau_grctx.h"
32 static int nv40_graph_register(struct drm_device *);
33 static void nv40_graph_isr(struct drm_device *);
35 struct nouveau_channel *
36 nv40_graph_channel(struct drm_device *dev)
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
43 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
45 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
47 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
48 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
50 if (chan && chan->ramin_grctx &&
51 chan->ramin_grctx->pinst == inst)
59 nv40_graph_create_context(struct nouveau_channel *chan)
61 struct drm_device *dev = chan->dev;
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
64 struct nouveau_grctx ctx = {};
67 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
68 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
72 /* Initialise default context values */
74 ctx.mode = NOUVEAU_GRCTX_VALS;
75 ctx.data = chan->ramin_grctx;
76 nv40_grctx_init(&ctx);
78 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
83 nv40_graph_destroy_context(struct nouveau_channel *chan)
85 struct drm_device *dev = chan->dev;
86 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
90 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
91 pgraph->fifo_access(dev, false);
93 /* Unload the context if it's the currently active one */
94 if (pgraph->channel(dev) == chan)
95 pgraph->unload_context(dev);
97 pgraph->fifo_access(dev, true);
98 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
100 /* Free the context resources */
101 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
105 nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
107 uint32_t old_cp, tv = 1000, tmp;
110 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
111 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
113 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
114 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
115 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
116 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
118 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
119 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
120 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
122 nouveau_wait_for_idle(dev);
124 for (i = 0; i < tv; i++) {
125 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
129 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
132 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
133 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
134 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
135 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
136 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
137 NV_ERROR(dev, "0x40030C = 0x%08x\n",
138 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
145 /* Restore the context for a specific channel into PGRAPH */
147 nv40_graph_load_context(struct nouveau_channel *chan)
149 struct drm_device *dev = chan->dev;
153 if (!chan->ramin_grctx)
155 inst = chan->ramin_grctx->pinst >> 4;
157 ret = nv40_graph_transfer_context(dev, inst, 0);
161 /* 0x40032C, no idea of it's exact function. Could simply be a
162 * record of the currently active PGRAPH context. It's currently
163 * unknown as to what bit 24 does. The nv ddx has it set, so we will
166 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
167 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
168 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
169 NV40_PGRAPH_CTXCTL_CUR_LOADED);
170 /* 0x32E0 records the instance address of the active FIFO's PGRAPH
171 * context. If at any time this doesn't match 0x40032C, you will
172 * recieve PGRAPH_INTR_CONTEXT_SWITCH
174 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
179 nv40_graph_unload_context(struct drm_device *dev)
184 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
185 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
187 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
189 ret = nv40_graph_transfer_context(dev, inst, 1);
191 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
196 nv40_graph_set_tile_region(struct drm_device *dev, int i)
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
201 switch (dev_priv->chipset) {
205 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
206 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
207 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
214 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
215 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
216 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
217 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
218 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
219 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
223 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
224 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
225 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
226 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
227 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
228 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
243 nv40_graph_init(struct drm_device *dev)
245 struct drm_nouveau_private *dev_priv =
246 (struct drm_nouveau_private *)dev->dev_private;
247 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
248 struct nouveau_grctx ctx = {};
249 uint32_t vramsz, *cp;
252 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
253 ~NV_PMC_ENABLE_PGRAPH);
254 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
255 NV_PMC_ENABLE_PGRAPH);
257 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
262 ctx.mode = NOUVEAU_GRCTX_PROG;
264 ctx.ctxprog_max = 256;
265 nv40_grctx_init(&ctx);
266 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
268 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
269 for (i = 0; i < ctx.ctxprog_len; i++)
270 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
274 ret = nv40_graph_register(dev);
278 /* No context present currently */
279 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
281 nouveau_irq_register(dev, 12, nv40_graph_isr);
282 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
283 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
285 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
286 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
287 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
288 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
289 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
290 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
292 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
293 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
295 j = nv_rd32(dev, 0x1540) & 0xff;
297 for (i = 0; !(j & 1); j >>= 1, i++)
299 nv_wr32(dev, 0x405000, i);
302 if (dev_priv->chipset == 0x40) {
303 nv_wr32(dev, 0x4009b0, 0x83280fff);
304 nv_wr32(dev, 0x4009b4, 0x000000a0);
306 nv_wr32(dev, 0x400820, 0x83280eff);
307 nv_wr32(dev, 0x400824, 0x000000a0);
310 switch (dev_priv->chipset) {
313 nv_wr32(dev, 0x4009b8, 0x0078e366);
314 nv_wr32(dev, 0x4009bc, 0x0000014c);
317 case 0x42: /* pciid also 0x00Cx */
318 /* case 0x0120: XXX (pciid) */
319 nv_wr32(dev, 0x400828, 0x007596ff);
320 nv_wr32(dev, 0x40082c, 0x00000108);
323 nv_wr32(dev, 0x400828, 0x0072cb77);
324 nv_wr32(dev, 0x40082c, 0x00000108);
329 case 0x4c: /* G7x-based C51 */
331 nv_wr32(dev, 0x400860, 0);
332 nv_wr32(dev, 0x400864, 0);
337 nv_wr32(dev, 0x400828, 0x07830610);
338 nv_wr32(dev, 0x40082c, 0x0000016A);
344 nv_wr32(dev, 0x400b38, 0x2ffff800);
345 nv_wr32(dev, 0x400b3c, 0x00006000);
347 /* Tiling related stuff. */
348 switch (dev_priv->chipset) {
351 nv_wr32(dev, 0x400bc4, 0x1003d888);
352 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
355 nv_wr32(dev, 0x400bc4, 0x0000e024);
356 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
361 nv_wr32(dev, 0x400bc4, 0x1003d888);
362 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
368 /* Turn all the tiling regions off. */
369 for (i = 0; i < pfb->num_tiles; i++)
370 nv40_graph_set_tile_region(dev, i);
372 /* begin RAM config */
373 vramsz = pci_resource_len(dev->pdev, 0) - 1;
374 switch (dev_priv->chipset) {
376 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
377 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
378 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
379 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
380 nv_wr32(dev, 0x400820, 0);
381 nv_wr32(dev, 0x400824, 0);
382 nv_wr32(dev, 0x400864, vramsz);
383 nv_wr32(dev, 0x400868, vramsz);
386 switch (dev_priv->chipset) {
391 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
392 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
395 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
396 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
399 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
400 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
401 nv_wr32(dev, 0x400840, 0);
402 nv_wr32(dev, 0x400844, 0);
403 nv_wr32(dev, 0x4008A0, vramsz);
404 nv_wr32(dev, 0x4008A4, vramsz);
411 void nv40_graph_takedown(struct drm_device *dev)
413 nouveau_irq_unregister(dev, 12);
417 nv40_graph_register(struct drm_device *dev)
419 struct drm_nouveau_private *dev_priv = dev->dev_private;
421 if (dev_priv->engine.graph.registered)
424 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
425 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
426 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
427 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
428 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
429 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
430 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
431 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
432 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
433 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
434 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
435 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
436 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
437 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
438 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
439 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
442 if (dev_priv->chipset >= 0x60 ||
443 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
444 NVOBJ_CLASS(dev, 0x4497, GR);
446 NVOBJ_CLASS(dev, 0x4097, GR);
449 NVOBJ_CLASS(dev, 0x506e, SW);
450 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
452 dev_priv->engine.graph.registered = true;
457 nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
459 struct drm_nouveau_private *dev_priv = dev->dev_private;
460 struct nouveau_channel *chan;
464 spin_lock_irqsave(&dev_priv->channels.lock, flags);
465 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
466 chan = dev_priv->channels.ptr[i];
467 if (!chan || !chan->ramin_grctx)
470 if (inst == chan->ramin_grctx->pinst)
473 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
478 nv40_graph_isr(struct drm_device *dev)
482 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
483 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
484 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
485 u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
486 u32 chid = nv40_graph_isr_chid(dev, inst);
487 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
488 u32 subc = (addr & 0x00070000) >> 16;
489 u32 mthd = (addr & 0x00001ffc);
490 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
491 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
494 if (stat & NV_PGRAPH_INTR_ERROR) {
495 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
496 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
497 show &= ~NV_PGRAPH_INTR_ERROR;
499 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
500 nv_mask(dev, 0x402000, 0, 0);
504 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
505 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
507 if (show && nouveau_ratelimit()) {
508 NV_INFO(dev, "PGRAPH -");
509 nouveau_bitfield_print(nv10_graph_intr, show);
511 nouveau_bitfield_print(nv04_graph_nsource, nsource);
513 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
515 NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
516 "class 0x%04x mthd 0x%04x data 0x%08x\n",
517 chid, inst, subc, class, mthd, data);