Merge branch 'fix/hda' into for-linus
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nv40_graph.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include <linux/firmware.h>
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "nouveau_drv.h"
32
33 MODULE_FIRMWARE("nouveau/nv40.ctxprog");
34 MODULE_FIRMWARE("nouveau/nv40.ctxvals");
35 MODULE_FIRMWARE("nouveau/nv41.ctxprog");
36 MODULE_FIRMWARE("nouveau/nv41.ctxvals");
37 MODULE_FIRMWARE("nouveau/nv42.ctxprog");
38 MODULE_FIRMWARE("nouveau/nv42.ctxvals");
39 MODULE_FIRMWARE("nouveau/nv43.ctxprog");
40 MODULE_FIRMWARE("nouveau/nv43.ctxvals");
41 MODULE_FIRMWARE("nouveau/nv44.ctxprog");
42 MODULE_FIRMWARE("nouveau/nv44.ctxvals");
43 MODULE_FIRMWARE("nouveau/nv46.ctxprog");
44 MODULE_FIRMWARE("nouveau/nv46.ctxvals");
45 MODULE_FIRMWARE("nouveau/nv47.ctxprog");
46 MODULE_FIRMWARE("nouveau/nv47.ctxvals");
47 MODULE_FIRMWARE("nouveau/nv49.ctxprog");
48 MODULE_FIRMWARE("nouveau/nv49.ctxvals");
49 MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
50 MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
51 MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
52 MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
53 MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
54 MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
55 MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
56 MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
57
58 struct nouveau_channel *
59 nv40_graph_channel(struct drm_device *dev)
60 {
61         struct drm_nouveau_private *dev_priv = dev->dev_private;
62         uint32_t inst;
63         int i;
64
65         inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
66         if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
67                 return NULL;
68         inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
69
70         for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
71                 struct nouveau_channel *chan = dev_priv->fifos[i];
72
73                 if (chan && chan->ramin_grctx &&
74                     chan->ramin_grctx->instance == inst)
75                         return chan;
76         }
77
78         return NULL;
79 }
80
81 int
82 nv40_graph_create_context(struct nouveau_channel *chan)
83 {
84         struct drm_device *dev = chan->dev;
85         struct drm_nouveau_private *dev_priv = dev->dev_private;
86         struct nouveau_gpuobj *ctx;
87         int ret;
88
89         /* Allocate a 175KiB block of PRAMIN to store the context.  This
90          * is massive overkill for a lot of chipsets, but it should be safe
91          * until we're able to implement this properly (will happen at more
92          * or less the same time we're able to write our own context programs.
93          */
94         ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
95                                           NVOBJ_FLAG_ZERO_ALLOC,
96                                           &chan->ramin_grctx);
97         if (ret)
98                 return ret;
99         ctx = chan->ramin_grctx->gpuobj;
100
101         /* Initialise default context values */
102         dev_priv->engine.instmem.prepare_access(dev, true);
103         nv40_grctx_vals_load(dev, ctx);
104         nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
105         dev_priv->engine.instmem.finish_access(dev);
106
107         return 0;
108 }
109
110 void
111 nv40_graph_destroy_context(struct nouveau_channel *chan)
112 {
113         nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
114 }
115
116 static int
117 nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
118 {
119         uint32_t old_cp, tv = 1000, tmp;
120         int i;
121
122         old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
123         nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
124
125         tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
126         tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
127                       NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
128         nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
129
130         tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
131         tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
132         nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
133
134         nouveau_wait_for_idle(dev);
135
136         for (i = 0; i < tv; i++) {
137                 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
138                         break;
139         }
140
141         nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
142
143         if (i == tv) {
144                 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
145                 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
146                 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
147                          ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
148                          ucstat  & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
149                 NV_ERROR(dev, "0x40030C = 0x%08x\n",
150                          nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
151                 return -EBUSY;
152         }
153
154         return 0;
155 }
156
157 /* Restore the context for a specific channel into PGRAPH */
158 int
159 nv40_graph_load_context(struct nouveau_channel *chan)
160 {
161         struct drm_device *dev = chan->dev;
162         uint32_t inst;
163         int ret;
164
165         if (!chan->ramin_grctx)
166                 return -EINVAL;
167         inst = chan->ramin_grctx->instance >> 4;
168
169         ret = nv40_graph_transfer_context(dev, inst, 0);
170         if (ret)
171                 return ret;
172
173         /* 0x40032C, no idea of it's exact function.  Could simply be a
174          * record of the currently active PGRAPH context.  It's currently
175          * unknown as to what bit 24 does.  The nv ddx has it set, so we will
176          * set it here too.
177          */
178         nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
179         nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
180                  (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
181                   NV40_PGRAPH_CTXCTL_CUR_LOADED);
182         /* 0x32E0 records the instance address of the active FIFO's PGRAPH
183          * context.  If at any time this doesn't match 0x40032C, you will
184          * recieve PGRAPH_INTR_CONTEXT_SWITCH
185          */
186         nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
187         return 0;
188 }
189
190 int
191 nv40_graph_unload_context(struct drm_device *dev)
192 {
193         uint32_t inst;
194         int ret;
195
196         inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
197         if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
198                 return 0;
199         inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
200
201         ret = nv40_graph_transfer_context(dev, inst, 1);
202
203         nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
204         return ret;
205 }
206
207 struct nouveau_ctxprog {
208         uint32_t signature;
209         uint8_t  version;
210         uint16_t length;
211         uint32_t data[];
212 } __attribute__ ((packed));
213
214 struct nouveau_ctxvals {
215         uint32_t signature;
216         uint8_t  version;
217         uint32_t length;
218         struct {
219                 uint32_t offset;
220                 uint32_t value;
221         } data[];
222 } __attribute__ ((packed));
223
224 int
225 nv40_grctx_init(struct drm_device *dev)
226 {
227         struct drm_nouveau_private *dev_priv = dev->dev_private;
228         struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
229         const int chipset = dev_priv->chipset;
230         const struct firmware *fw;
231         const struct nouveau_ctxprog *cp;
232         const struct nouveau_ctxvals *cv;
233         char name[32];
234         int ret, i;
235
236         pgraph->accel_blocked = true;
237
238         if (!pgraph->ctxprog) {
239                 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
240                 ret = request_firmware(&fw, name, &dev->pdev->dev);
241                 if (ret) {
242                         NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
243                         return ret;
244                 }
245
246                 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
247                 if (!pgraph->ctxprog) {
248                         NV_ERROR(dev, "OOM copying ctxprog\n");
249                         release_firmware(fw);
250                         return -ENOMEM;
251                 }
252                 memcpy(pgraph->ctxprog, fw->data, fw->size);
253
254                 cp = pgraph->ctxprog;
255                 if (le32_to_cpu(cp->signature) != 0x5043564e ||
256                     cp->version != 0 ||
257                     le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
258                         NV_ERROR(dev, "ctxprog invalid\n");
259                         release_firmware(fw);
260                         nv40_grctx_fini(dev);
261                         return -EINVAL;
262                 }
263                 release_firmware(fw);
264         }
265
266         if (!pgraph->ctxvals) {
267                 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
268                 ret = request_firmware(&fw, name, &dev->pdev->dev);
269                 if (ret) {
270                         NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
271                         nv40_grctx_fini(dev);
272                         return ret;
273                 }
274
275                 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
276                 if (!pgraph->ctxprog) {
277                         NV_ERROR(dev, "OOM copying ctxprog\n");
278                         release_firmware(fw);
279                         nv40_grctx_fini(dev);
280                         return -ENOMEM;
281                 }
282                 memcpy(pgraph->ctxvals, fw->data, fw->size);
283
284                 cv = (void *)pgraph->ctxvals;
285                 if (le32_to_cpu(cv->signature) != 0x5643564e ||
286                     cv->version != 0 ||
287                     le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
288                         NV_ERROR(dev, "ctxvals invalid\n");
289                         release_firmware(fw);
290                         nv40_grctx_fini(dev);
291                         return -EINVAL;
292                 }
293                 release_firmware(fw);
294         }
295
296         cp = pgraph->ctxprog;
297
298         nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
299         for (i = 0; i < le16_to_cpu(cp->length); i++)
300                 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
301                         le32_to_cpu(cp->data[i]));
302
303         pgraph->accel_blocked = false;
304         return 0;
305 }
306
307 void
308 nv40_grctx_fini(struct drm_device *dev)
309 {
310         struct drm_nouveau_private *dev_priv = dev->dev_private;
311         struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
312
313         if (pgraph->ctxprog) {
314                 kfree(pgraph->ctxprog);
315                 pgraph->ctxprog = NULL;
316         }
317
318         if (pgraph->ctxvals) {
319                 kfree(pgraph->ctxprog);
320                 pgraph->ctxvals = NULL;
321         }
322 }
323
324 void
325 nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
326 {
327         struct drm_nouveau_private *dev_priv = dev->dev_private;
328         struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
329         struct nouveau_ctxvals *cv = pgraph->ctxvals;
330         int i;
331
332         if (!cv)
333                 return;
334
335         for (i = 0; i < le32_to_cpu(cv->length); i++)
336                 nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
337                         le32_to_cpu(cv->data[i].value));
338 }
339
340 /*
341  * G70          0x47
342  * G71          0x49
343  * NV45         0x48
344  * G72[M]       0x46
345  * G73          0x4b
346  * C51_G7X      0x4c
347  * C51          0x4e
348  */
349 int
350 nv40_graph_init(struct drm_device *dev)
351 {
352         struct drm_nouveau_private *dev_priv =
353                 (struct drm_nouveau_private *)dev->dev_private;
354         uint32_t vramsz, tmp;
355         int i, j;
356
357         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
358                         ~NV_PMC_ENABLE_PGRAPH);
359         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
360                          NV_PMC_ENABLE_PGRAPH);
361
362         nv40_grctx_init(dev);
363
364         /* No context present currently */
365         nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
366
367         nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
368         nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
369
370         nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
371         nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
372         nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
373         nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
374         nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
375         nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
376
377         nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
378         nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
379
380         j = nv_rd32(dev, 0x1540) & 0xff;
381         if (j) {
382                 for (i = 0; !(j & 1); j >>= 1, i++)
383                         ;
384                 nv_wr32(dev, 0x405000, i);
385         }
386
387         if (dev_priv->chipset == 0x40) {
388                 nv_wr32(dev, 0x4009b0, 0x83280fff);
389                 nv_wr32(dev, 0x4009b4, 0x000000a0);
390         } else {
391                 nv_wr32(dev, 0x400820, 0x83280eff);
392                 nv_wr32(dev, 0x400824, 0x000000a0);
393         }
394
395         switch (dev_priv->chipset) {
396         case 0x40:
397         case 0x45:
398                 nv_wr32(dev, 0x4009b8, 0x0078e366);
399                 nv_wr32(dev, 0x4009bc, 0x0000014c);
400                 break;
401         case 0x41:
402         case 0x42: /* pciid also 0x00Cx */
403         /* case 0x0120: XXX (pciid) */
404                 nv_wr32(dev, 0x400828, 0x007596ff);
405                 nv_wr32(dev, 0x40082c, 0x00000108);
406                 break;
407         case 0x43:
408                 nv_wr32(dev, 0x400828, 0x0072cb77);
409                 nv_wr32(dev, 0x40082c, 0x00000108);
410                 break;
411         case 0x44:
412         case 0x46: /* G72 */
413         case 0x4a:
414         case 0x4c: /* G7x-based C51 */
415         case 0x4e:
416                 nv_wr32(dev, 0x400860, 0);
417                 nv_wr32(dev, 0x400864, 0);
418                 break;
419         case 0x47: /* G70 */
420         case 0x49: /* G71 */
421         case 0x4b: /* G73 */
422                 nv_wr32(dev, 0x400828, 0x07830610);
423                 nv_wr32(dev, 0x40082c, 0x0000016A);
424                 break;
425         default:
426                 break;
427         }
428
429         nv_wr32(dev, 0x400b38, 0x2ffff800);
430         nv_wr32(dev, 0x400b3c, 0x00006000);
431
432         /* copy tile info from PFB */
433         switch (dev_priv->chipset) {
434         case 0x40: /* vanilla NV40 */
435                 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
436                         tmp = nv_rd32(dev, NV10_PFB_TILE(i));
437                         nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
438                         nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
439                         tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
440                         nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
441                         nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
442                         tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
443                         nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
444                         nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
445                         tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
446                         nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
447                         nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
448                 }
449                 break;
450         case 0x44:
451         case 0x4a:
452         case 0x4e: /* NV44-based cores don't have 0x406900? */
453                 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
454                         tmp = nv_rd32(dev, NV40_PFB_TILE(i));
455                         nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
456                         tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
457                         nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
458                         tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
459                         nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
460                         tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
461                         nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
462                 }
463                 break;
464         case 0x46:
465         case 0x47:
466         case 0x49:
467         case 0x4b: /* G7X-based cores */
468                 for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
469                         tmp = nv_rd32(dev, NV40_PFB_TILE(i));
470                         nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
471                         nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
472                         tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
473                         nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
474                         nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
475                         tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
476                         nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
477                         nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
478                         tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
479                         nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
480                         nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
481                 }
482                 break;
483         default: /* everything else */
484                 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
485                         tmp = nv_rd32(dev, NV40_PFB_TILE(i));
486                         nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
487                         nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
488                         tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
489                         nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
490                         nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
491                         tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
492                         nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
493                         nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
494                         tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
495                         nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
496                         nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
497                 }
498                 break;
499         }
500
501         /* begin RAM config */
502         vramsz = drm_get_resource_len(dev, 0) - 1;
503         switch (dev_priv->chipset) {
504         case 0x40:
505                 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
506                 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
507                 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
508                 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
509                 nv_wr32(dev, 0x400820, 0);
510                 nv_wr32(dev, 0x400824, 0);
511                 nv_wr32(dev, 0x400864, vramsz);
512                 nv_wr32(dev, 0x400868, vramsz);
513                 break;
514         default:
515                 switch (dev_priv->chipset) {
516                 case 0x46:
517                 case 0x47:
518                 case 0x49:
519                 case 0x4b:
520                         nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
521                         nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
522                         break;
523                 default:
524                         nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
525                         nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
526                         break;
527                 }
528                 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
529                 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
530                 nv_wr32(dev, 0x400840, 0);
531                 nv_wr32(dev, 0x400844, 0);
532                 nv_wr32(dev, 0x4008A0, vramsz);
533                 nv_wr32(dev, 0x4008A4, vramsz);
534                 break;
535         }
536
537         return 0;
538 }
539
540 void nv40_graph_takedown(struct drm_device *dev)
541 {
542 }
543
544 struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
545         { 0x0030, false, NULL }, /* null */
546         { 0x0039, false, NULL }, /* m2mf */
547         { 0x004a, false, NULL }, /* gdirect */
548         { 0x009f, false, NULL }, /* imageblit (nv12) */
549         { 0x008a, false, NULL }, /* ifc */
550         { 0x0089, false, NULL }, /* sifm */
551         { 0x3089, false, NULL }, /* sifm (nv40) */
552         { 0x0062, false, NULL }, /* surf2d */
553         { 0x3062, false, NULL }, /* surf2d (nv40) */
554         { 0x0043, false, NULL }, /* rop */
555         { 0x0012, false, NULL }, /* beta1 */
556         { 0x0072, false, NULL }, /* beta4 */
557         { 0x0019, false, NULL }, /* cliprect */
558         { 0x0044, false, NULL }, /* pattern */
559         { 0x309e, false, NULL }, /* swzsurf */
560         { 0x4097, false, NULL }, /* curie (nv40) */
561         { 0x4497, false, NULL }, /* curie (nv44) */
562         {}
563 };
564