Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nv40_graph.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include <linux/firmware.h>
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "nouveau_drv.h"
32
33 MODULE_FIRMWARE("nouveau/nv40.ctxprog");
34 MODULE_FIRMWARE("nouveau/nv40.ctxvals");
35 MODULE_FIRMWARE("nouveau/nv41.ctxprog");
36 MODULE_FIRMWARE("nouveau/nv41.ctxvals");
37 MODULE_FIRMWARE("nouveau/nv42.ctxprog");
38 MODULE_FIRMWARE("nouveau/nv42.ctxvals");
39 MODULE_FIRMWARE("nouveau/nv43.ctxprog");
40 MODULE_FIRMWARE("nouveau/nv43.ctxvals");
41 MODULE_FIRMWARE("nouveau/nv44.ctxprog");
42 MODULE_FIRMWARE("nouveau/nv44.ctxvals");
43 MODULE_FIRMWARE("nouveau/nv46.ctxprog");
44 MODULE_FIRMWARE("nouveau/nv46.ctxvals");
45 MODULE_FIRMWARE("nouveau/nv47.ctxprog");
46 MODULE_FIRMWARE("nouveau/nv47.ctxvals");
47 MODULE_FIRMWARE("nouveau/nv49.ctxprog");
48 MODULE_FIRMWARE("nouveau/nv49.ctxvals");
49 MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
50 MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
51 MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
52 MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
53 MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
54 MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
55 MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
56 MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
57
58 struct nouveau_channel *
59 nv40_graph_channel(struct drm_device *dev)
60 {
61         struct drm_nouveau_private *dev_priv = dev->dev_private;
62         uint32_t inst;
63         int i;
64
65         inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
66         if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
67                 return NULL;
68         inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
69
70         for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
71                 struct nouveau_channel *chan = dev_priv->fifos[i];
72
73                 if (chan && chan->ramin_grctx &&
74                     chan->ramin_grctx->instance == inst)
75                         return chan;
76         }
77
78         return NULL;
79 }
80
81 int
82 nv40_graph_create_context(struct nouveau_channel *chan)
83 {
84         struct drm_device *dev = chan->dev;
85         struct drm_nouveau_private *dev_priv = dev->dev_private;
86         struct nouveau_gpuobj *ctx;
87         int ret;
88
89         /* Allocate a 175KiB block of PRAMIN to store the context.  This
90          * is massive overkill for a lot of chipsets, but it should be safe
91          * until we're able to implement this properly (will happen at more
92          * or less the same time we're able to write our own context programs.
93          */
94         ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
95                                           NVOBJ_FLAG_ZERO_ALLOC,
96                                           &chan->ramin_grctx);
97         if (ret)
98                 return ret;
99         ctx = chan->ramin_grctx->gpuobj;
100
101         /* Initialise default context values */
102         dev_priv->engine.instmem.prepare_access(dev, true);
103         nv40_grctx_vals_load(dev, ctx);
104         nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
105         dev_priv->engine.instmem.finish_access(dev);
106
107         return 0;
108 }
109
110 void
111 nv40_graph_destroy_context(struct nouveau_channel *chan)
112 {
113         nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
114 }
115
116 static int
117 nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
118 {
119         uint32_t old_cp, tv = 1000, tmp;
120         int i;
121
122         old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
123         nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
124
125         tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
126         tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
127                       NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
128         nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
129
130         tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
131         tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
132         nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
133
134         nouveau_wait_for_idle(dev);
135
136         for (i = 0; i < tv; i++) {
137                 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
138                         break;
139         }
140
141         nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
142
143         if (i == tv) {
144                 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
145                 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
146                 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
147                          ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
148                          ucstat  & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
149                 NV_ERROR(dev, "0x40030C = 0x%08x\n",
150                          nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
151                 return -EBUSY;
152         }
153
154         return 0;
155 }
156
157 /* Restore the context for a specific channel into PGRAPH */
158 int
159 nv40_graph_load_context(struct nouveau_channel *chan)
160 {
161         struct drm_device *dev = chan->dev;
162         uint32_t inst;
163         int ret;
164
165         if (!chan->ramin_grctx)
166                 return -EINVAL;
167         inst = chan->ramin_grctx->instance >> 4;
168
169         ret = nv40_graph_transfer_context(dev, inst, 0);
170         if (ret)
171                 return ret;
172
173         /* 0x40032C, no idea of it's exact function.  Could simply be a
174          * record of the currently active PGRAPH context.  It's currently
175          * unknown as to what bit 24 does.  The nv ddx has it set, so we will
176          * set it here too.
177          */
178         nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
179         nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
180                  (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
181                   NV40_PGRAPH_CTXCTL_CUR_LOADED);
182         /* 0x32E0 records the instance address of the active FIFO's PGRAPH
183          * context.  If at any time this doesn't match 0x40032C, you will
184          * recieve PGRAPH_INTR_CONTEXT_SWITCH
185          */
186         nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
187         return 0;
188 }
189
190 int
191 nv40_graph_unload_context(struct drm_device *dev)
192 {
193         uint32_t inst;
194         int ret;
195
196         inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
197         if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
198                 return 0;
199         inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
200
201         ret = nv40_graph_transfer_context(dev, inst, 1);
202
203         nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
204         return ret;
205 }
206
207 struct nouveau_ctxprog {
208         uint32_t signature;
209         uint8_t  version;
210         uint16_t length;
211         uint32_t data[];
212 } __attribute__ ((packed));
213
214 struct nouveau_ctxvals {
215         uint32_t signature;
216         uint8_t  version;
217         uint32_t length;
218         struct {
219                 uint32_t offset;
220                 uint32_t value;
221         } data[];
222 } __attribute__ ((packed));
223
224 int
225 nv40_grctx_init(struct drm_device *dev)
226 {
227         struct drm_nouveau_private *dev_priv = dev->dev_private;
228         struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
229         const int chipset = dev_priv->chipset;
230         const struct firmware *fw;
231         const struct nouveau_ctxprog *cp;
232         const struct nouveau_ctxvals *cv;
233         char name[32];
234         int ret, i;
235
236         pgraph->accel_blocked = true;
237
238         if (!pgraph->ctxprog) {
239                 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
240                 ret = request_firmware(&fw, name, &dev->pdev->dev);
241                 if (ret) {
242                         NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
243                         return ret;
244                 }
245
246                 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
247                 if (!pgraph->ctxprog) {
248                         NV_ERROR(dev, "OOM copying ctxprog\n");
249                         release_firmware(fw);
250                         return -ENOMEM;
251                 }
252                 memcpy(pgraph->ctxprog, fw->data, fw->size);
253
254                 cp = pgraph->ctxprog;
255                 if (cp->signature != 0x5043564e || cp->version != 0 ||
256                     cp->length != ((fw->size - 7) / 4)) {
257                         NV_ERROR(dev, "ctxprog invalid\n");
258                         release_firmware(fw);
259                         nv40_grctx_fini(dev);
260                         return -EINVAL;
261                 }
262                 release_firmware(fw);
263         }
264
265         if (!pgraph->ctxvals) {
266                 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
267                 ret = request_firmware(&fw, name, &dev->pdev->dev);
268                 if (ret) {
269                         NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
270                         nv40_grctx_fini(dev);
271                         return ret;
272                 }
273
274                 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
275                 if (!pgraph->ctxprog) {
276                         NV_ERROR(dev, "OOM copying ctxprog\n");
277                         release_firmware(fw);
278                         nv40_grctx_fini(dev);
279                         return -ENOMEM;
280                 }
281                 memcpy(pgraph->ctxvals, fw->data, fw->size);
282
283                 cv = (void *)pgraph->ctxvals;
284                 if (cv->signature != 0x5643564e || cv->version != 0 ||
285                     cv->length != ((fw->size - 9) / 8)) {
286                         NV_ERROR(dev, "ctxvals invalid\n");
287                         release_firmware(fw);
288                         nv40_grctx_fini(dev);
289                         return -EINVAL;
290                 }
291                 release_firmware(fw);
292         }
293
294         cp = pgraph->ctxprog;
295
296         nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
297         for (i = 0; i < cp->length; i++)
298                 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
299
300         pgraph->accel_blocked = false;
301         return 0;
302 }
303
304 void
305 nv40_grctx_fini(struct drm_device *dev)
306 {
307         struct drm_nouveau_private *dev_priv = dev->dev_private;
308         struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
309
310         if (pgraph->ctxprog) {
311                 kfree(pgraph->ctxprog);
312                 pgraph->ctxprog = NULL;
313         }
314
315         if (pgraph->ctxvals) {
316                 kfree(pgraph->ctxprog);
317                 pgraph->ctxvals = NULL;
318         }
319 }
320
321 void
322 nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
323 {
324         struct drm_nouveau_private *dev_priv = dev->dev_private;
325         struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
326         struct nouveau_ctxvals *cv = pgraph->ctxvals;
327         int i;
328
329         if (!cv)
330                 return;
331
332         for (i = 0; i < cv->length; i++)
333                 nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
334 }
335
336 /*
337  * G70          0x47
338  * G71          0x49
339  * NV45         0x48
340  * G72[M]       0x46
341  * G73          0x4b
342  * C51_G7X      0x4c
343  * C51          0x4e
344  */
345 int
346 nv40_graph_init(struct drm_device *dev)
347 {
348         struct drm_nouveau_private *dev_priv =
349                 (struct drm_nouveau_private *)dev->dev_private;
350         uint32_t vramsz, tmp;
351         int i, j;
352
353         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
354                         ~NV_PMC_ENABLE_PGRAPH);
355         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
356                          NV_PMC_ENABLE_PGRAPH);
357
358         nv40_grctx_init(dev);
359
360         /* No context present currently */
361         nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
362
363         nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
364         nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
365
366         nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
367         nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
368         nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
369         nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
370         nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
371         nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
372
373         nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
374         nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
375
376         j = nv_rd32(dev, 0x1540) & 0xff;
377         if (j) {
378                 for (i = 0; !(j & 1); j >>= 1, i++)
379                         ;
380                 nv_wr32(dev, 0x405000, i);
381         }
382
383         if (dev_priv->chipset == 0x40) {
384                 nv_wr32(dev, 0x4009b0, 0x83280fff);
385                 nv_wr32(dev, 0x4009b4, 0x000000a0);
386         } else {
387                 nv_wr32(dev, 0x400820, 0x83280eff);
388                 nv_wr32(dev, 0x400824, 0x000000a0);
389         }
390
391         switch (dev_priv->chipset) {
392         case 0x40:
393         case 0x45:
394                 nv_wr32(dev, 0x4009b8, 0x0078e366);
395                 nv_wr32(dev, 0x4009bc, 0x0000014c);
396                 break;
397         case 0x41:
398         case 0x42: /* pciid also 0x00Cx */
399         /* case 0x0120: XXX (pciid) */
400                 nv_wr32(dev, 0x400828, 0x007596ff);
401                 nv_wr32(dev, 0x40082c, 0x00000108);
402                 break;
403         case 0x43:
404                 nv_wr32(dev, 0x400828, 0x0072cb77);
405                 nv_wr32(dev, 0x40082c, 0x00000108);
406                 break;
407         case 0x44:
408         case 0x46: /* G72 */
409         case 0x4a:
410         case 0x4c: /* G7x-based C51 */
411         case 0x4e:
412                 nv_wr32(dev, 0x400860, 0);
413                 nv_wr32(dev, 0x400864, 0);
414                 break;
415         case 0x47: /* G70 */
416         case 0x49: /* G71 */
417         case 0x4b: /* G73 */
418                 nv_wr32(dev, 0x400828, 0x07830610);
419                 nv_wr32(dev, 0x40082c, 0x0000016A);
420                 break;
421         default:
422                 break;
423         }
424
425         nv_wr32(dev, 0x400b38, 0x2ffff800);
426         nv_wr32(dev, 0x400b3c, 0x00006000);
427
428         /* copy tile info from PFB */
429         switch (dev_priv->chipset) {
430         case 0x40: /* vanilla NV40 */
431                 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
432                         tmp = nv_rd32(dev, NV10_PFB_TILE(i));
433                         nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
434                         nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
435                         tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
436                         nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
437                         nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
438                         tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
439                         nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
440                         nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
441                         tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
442                         nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
443                         nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
444                 }
445                 break;
446         case 0x44:
447         case 0x4a:
448         case 0x4e: /* NV44-based cores don't have 0x406900? */
449                 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
450                         tmp = nv_rd32(dev, NV40_PFB_TILE(i));
451                         nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
452                         tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
453                         nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
454                         tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
455                         nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
456                         tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
457                         nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
458                 }
459                 break;
460         case 0x46:
461         case 0x47:
462         case 0x49:
463         case 0x4b: /* G7X-based cores */
464                 for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
465                         tmp = nv_rd32(dev, NV40_PFB_TILE(i));
466                         nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
467                         nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
468                         tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
469                         nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
470                         nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
471                         tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
472                         nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
473                         nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
474                         tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
475                         nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
476                         nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
477                 }
478                 break;
479         default: /* everything else */
480                 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
481                         tmp = nv_rd32(dev, NV40_PFB_TILE(i));
482                         nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
483                         nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
484                         tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
485                         nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
486                         nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
487                         tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
488                         nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
489                         nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
490                         tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
491                         nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
492                         nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
493                 }
494                 break;
495         }
496
497         /* begin RAM config */
498         vramsz = drm_get_resource_len(dev, 0) - 1;
499         switch (dev_priv->chipset) {
500         case 0x40:
501                 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
502                 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
503                 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
504                 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
505                 nv_wr32(dev, 0x400820, 0);
506                 nv_wr32(dev, 0x400824, 0);
507                 nv_wr32(dev, 0x400864, vramsz);
508                 nv_wr32(dev, 0x400868, vramsz);
509                 break;
510         default:
511                 switch (dev_priv->chipset) {
512                 case 0x46:
513                 case 0x47:
514                 case 0x49:
515                 case 0x4b:
516                         nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
517                         nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
518                         break;
519                 default:
520                         nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
521                         nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
522                         break;
523                 }
524                 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
525                 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
526                 nv_wr32(dev, 0x400840, 0);
527                 nv_wr32(dev, 0x400844, 0);
528                 nv_wr32(dev, 0x4008A0, vramsz);
529                 nv_wr32(dev, 0x4008A4, vramsz);
530                 break;
531         }
532
533         return 0;
534 }
535
536 void nv40_graph_takedown(struct drm_device *dev)
537 {
538 }
539
540 struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
541         { 0x0030, false, NULL }, /* null */
542         { 0x0039, false, NULL }, /* m2mf */
543         { 0x004a, false, NULL }, /* gdirect */
544         { 0x009f, false, NULL }, /* imageblit (nv12) */
545         { 0x008a, false, NULL }, /* ifc */
546         { 0x0089, false, NULL }, /* sifm */
547         { 0x3089, false, NULL }, /* sifm (nv40) */
548         { 0x0062, false, NULL }, /* surf2d */
549         { 0x3062, false, NULL }, /* surf2d (nv40) */
550         { 0x0043, false, NULL }, /* rop */
551         { 0x0012, false, NULL }, /* beta1 */
552         { 0x0072, false, NULL }, /* beta4 */
553         { 0x0019, false, NULL }, /* cliprect */
554         { 0x0044, false, NULL }, /* pattern */
555         { 0x309e, false, NULL }, /* swzsurf */
556         { 0x4097, false, NULL }, /* curie (nv40) */
557         { 0x4497, false, NULL }, /* curie (nv44) */
558         {}
559 };
560