Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[pandora-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/console.h>
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/radeon_drm.h>
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "radeon_asic.h"
35 #include "atom.h"
36
37 /*
38  * Clear GPU surface registers.
39  */
40 static void radeon_surface_init(struct radeon_device *rdev)
41 {
42         /* FIXME: check this out */
43         if (rdev->family < CHIP_R600) {
44                 int i;
45
46                 for (i = 0; i < 8; i++) {
47                         WREG32(RADEON_SURFACE0_INFO +
48                                i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
49                                0);
50                 }
51                 /* enable surfaces */
52                 WREG32(RADEON_SURFACE_CNTL, 0);
53         }
54 }
55
56 /*
57  * GPU scratch registers helpers function.
58  */
59 static void radeon_scratch_init(struct radeon_device *rdev)
60 {
61         int i;
62
63         /* FIXME: check this out */
64         if (rdev->family < CHIP_R300) {
65                 rdev->scratch.num_reg = 5;
66         } else {
67                 rdev->scratch.num_reg = 7;
68         }
69         for (i = 0; i < rdev->scratch.num_reg; i++) {
70                 rdev->scratch.free[i] = true;
71                 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
72         }
73 }
74
75 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
76 {
77         int i;
78
79         for (i = 0; i < rdev->scratch.num_reg; i++) {
80                 if (rdev->scratch.free[i]) {
81                         rdev->scratch.free[i] = false;
82                         *reg = rdev->scratch.reg[i];
83                         return 0;
84                 }
85         }
86         return -EINVAL;
87 }
88
89 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
90 {
91         int i;
92
93         for (i = 0; i < rdev->scratch.num_reg; i++) {
94                 if (rdev->scratch.reg[i] == reg) {
95                         rdev->scratch.free[i] = true;
96                         return;
97                 }
98         }
99 }
100
101 /*
102  * MC common functions
103  */
104 int radeon_mc_setup(struct radeon_device *rdev)
105 {
106         uint32_t tmp;
107
108         /* Some chips have an "issue" with the memory controller, the
109          * location must be aligned to the size. We just align it down,
110          * too bad if we walk over the top of system memory, we don't
111          * use DMA without a remapped anyway.
112          * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
113          */
114         /* FGLRX seems to setup like this, VRAM a 0, then GART.
115          */
116         /*
117          * Note: from R6xx the address space is 40bits but here we only
118          * use 32bits (still have to see a card which would exhaust 4G
119          * address space).
120          */
121         if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
122                 /* vram location was already setup try to put gtt after
123                  * if it fits */
124                 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
125                 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
126                 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
127                         rdev->mc.gtt_location = tmp;
128                 } else {
129                         if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
130                                 printk(KERN_ERR "[drm] GTT too big to fit "
131                                        "before or after vram location.\n");
132                                 return -EINVAL;
133                         }
134                         rdev->mc.gtt_location = 0;
135                 }
136         } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
137                 /* gtt location was already setup try to put vram before
138                  * if it fits */
139                 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
140                         rdev->mc.vram_location = 0;
141                 } else {
142                         tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
143                         tmp += (rdev->mc.mc_vram_size - 1);
144                         tmp &= ~(rdev->mc.mc_vram_size - 1);
145                         if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
146                                 rdev->mc.vram_location = tmp;
147                         } else {
148                                 printk(KERN_ERR "[drm] vram too big to fit "
149                                        "before or after GTT location.\n");
150                                 return -EINVAL;
151                         }
152                 }
153         } else {
154                 rdev->mc.vram_location = 0;
155                 tmp = rdev->mc.mc_vram_size;
156                 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
157                 rdev->mc.gtt_location = tmp;
158         }
159         DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20);
160         DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
161                  rdev->mc.vram_location,
162                  rdev->mc.vram_location + rdev->mc.mc_vram_size - 1);
163         if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size)
164                 DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
165         DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
166         DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
167                  rdev->mc.gtt_location,
168                  rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
169         return 0;
170 }
171
172
173 /*
174  * GPU helpers function.
175  */
176 static bool radeon_card_posted(struct radeon_device *rdev)
177 {
178         uint32_t reg;
179
180         /* first check CRTCs */
181         if (ASIC_IS_AVIVO(rdev)) {
182                 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
183                       RREG32(AVIVO_D2CRTC_CONTROL);
184                 if (reg & AVIVO_CRTC_EN) {
185                         return true;
186                 }
187         } else {
188                 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
189                       RREG32(RADEON_CRTC2_GEN_CNTL);
190                 if (reg & RADEON_CRTC_EN) {
191                         return true;
192                 }
193         }
194
195         /* then check MEM_SIZE, in case the crtcs are off */
196         if (rdev->family >= CHIP_R600)
197                 reg = RREG32(R600_CONFIG_MEMSIZE);
198         else
199                 reg = RREG32(RADEON_CONFIG_MEMSIZE);
200
201         if (reg)
202                 return true;
203
204         return false;
205
206 }
207
208
209 /*
210  * Registers accessors functions.
211  */
212 uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
213 {
214         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
215         BUG_ON(1);
216         return 0;
217 }
218
219 void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
220 {
221         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
222                   reg, v);
223         BUG_ON(1);
224 }
225
226 void radeon_register_accessor_init(struct radeon_device *rdev)
227 {
228         rdev->mm_rreg = &r100_mm_rreg;
229         rdev->mm_wreg = &r100_mm_wreg;
230         rdev->mc_rreg = &radeon_invalid_rreg;
231         rdev->mc_wreg = &radeon_invalid_wreg;
232         rdev->pll_rreg = &radeon_invalid_rreg;
233         rdev->pll_wreg = &radeon_invalid_wreg;
234         rdev->pcie_rreg = &radeon_invalid_rreg;
235         rdev->pcie_wreg = &radeon_invalid_wreg;
236         rdev->pciep_rreg = &radeon_invalid_rreg;
237         rdev->pciep_wreg = &radeon_invalid_wreg;
238
239         /* Don't change order as we are overridding accessor. */
240         if (rdev->family < CHIP_RV515) {
241                 rdev->pcie_rreg = &rv370_pcie_rreg;
242                 rdev->pcie_wreg = &rv370_pcie_wreg;
243         }
244         if (rdev->family >= CHIP_RV515) {
245                 rdev->pcie_rreg = &rv515_pcie_rreg;
246                 rdev->pcie_wreg = &rv515_pcie_wreg;
247         }
248         /* FIXME: not sure here */
249         if (rdev->family <= CHIP_R580) {
250                 rdev->pll_rreg = &r100_pll_rreg;
251                 rdev->pll_wreg = &r100_pll_wreg;
252         }
253         if (rdev->family >= CHIP_RV515) {
254                 rdev->mc_rreg = &rv515_mc_rreg;
255                 rdev->mc_wreg = &rv515_mc_wreg;
256         }
257         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
258                 rdev->mc_rreg = &rs400_mc_rreg;
259                 rdev->mc_wreg = &rs400_mc_wreg;
260         }
261         if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
262                 rdev->mc_rreg = &rs690_mc_rreg;
263                 rdev->mc_wreg = &rs690_mc_wreg;
264         }
265         if (rdev->family == CHIP_RS600) {
266                 rdev->mc_rreg = &rs600_mc_rreg;
267                 rdev->mc_wreg = &rs600_mc_wreg;
268         }
269         if (rdev->family >= CHIP_R600) {
270                 rdev->pciep_rreg = &r600_pciep_rreg;
271                 rdev->pciep_wreg = &r600_pciep_wreg;
272         }
273 }
274
275
276 /*
277  * ASIC
278  */
279 int radeon_asic_init(struct radeon_device *rdev)
280 {
281         radeon_register_accessor_init(rdev);
282         switch (rdev->family) {
283         case CHIP_R100:
284         case CHIP_RV100:
285         case CHIP_RS100:
286         case CHIP_RV200:
287         case CHIP_RS200:
288         case CHIP_R200:
289         case CHIP_RV250:
290         case CHIP_RS300:
291         case CHIP_RV280:
292                 rdev->asic = &r100_asic;
293                 break;
294         case CHIP_R300:
295         case CHIP_R350:
296         case CHIP_RV350:
297         case CHIP_RV380:
298                 rdev->asic = &r300_asic;
299                 break;
300         case CHIP_R420:
301         case CHIP_R423:
302         case CHIP_RV410:
303                 rdev->asic = &r420_asic;
304                 break;
305         case CHIP_RS400:
306         case CHIP_RS480:
307                 rdev->asic = &rs400_asic;
308                 break;
309         case CHIP_RS600:
310                 rdev->asic = &rs600_asic;
311                 break;
312         case CHIP_RS690:
313         case CHIP_RS740:
314                 rdev->asic = &rs690_asic;
315                 break;
316         case CHIP_RV515:
317                 rdev->asic = &rv515_asic;
318                 break;
319         case CHIP_R520:
320         case CHIP_RV530:
321         case CHIP_RV560:
322         case CHIP_RV570:
323         case CHIP_R580:
324                 rdev->asic = &r520_asic;
325                 break;
326         case CHIP_R600:
327         case CHIP_RV610:
328         case CHIP_RV630:
329         case CHIP_RV620:
330         case CHIP_RV635:
331         case CHIP_RV670:
332         case CHIP_RS780:
333         case CHIP_RV770:
334         case CHIP_RV730:
335         case CHIP_RV710:
336         default:
337                 /* FIXME: not supported yet */
338                 return -EINVAL;
339         }
340         return 0;
341 }
342
343
344 /*
345  * Wrapper around modesetting bits.
346  */
347 int radeon_clocks_init(struct radeon_device *rdev)
348 {
349         int r;
350
351         radeon_get_clock_info(rdev->ddev);
352         r = radeon_static_clocks_init(rdev->ddev);
353         if (r) {
354                 return r;
355         }
356         DRM_INFO("Clocks initialized !\n");
357         return 0;
358 }
359
360 void radeon_clocks_fini(struct radeon_device *rdev)
361 {
362 }
363
364 /* ATOM accessor methods */
365 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
366 {
367         struct radeon_device *rdev = info->dev->dev_private;
368         uint32_t r;
369
370         r = rdev->pll_rreg(rdev, reg);
371         return r;
372 }
373
374 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
375 {
376         struct radeon_device *rdev = info->dev->dev_private;
377
378         rdev->pll_wreg(rdev, reg, val);
379 }
380
381 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
382 {
383         struct radeon_device *rdev = info->dev->dev_private;
384         uint32_t r;
385
386         r = rdev->mc_rreg(rdev, reg);
387         return r;
388 }
389
390 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
391 {
392         struct radeon_device *rdev = info->dev->dev_private;
393
394         rdev->mc_wreg(rdev, reg, val);
395 }
396
397 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
398 {
399         struct radeon_device *rdev = info->dev->dev_private;
400
401         WREG32(reg*4, val);
402 }
403
404 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
405 {
406         struct radeon_device *rdev = info->dev->dev_private;
407         uint32_t r;
408
409         r = RREG32(reg*4);
410         return r;
411 }
412
413 static struct card_info atom_card_info = {
414         .dev = NULL,
415         .reg_read = cail_reg_read,
416         .reg_write = cail_reg_write,
417         .mc_read = cail_mc_read,
418         .mc_write = cail_mc_write,
419         .pll_read = cail_pll_read,
420         .pll_write = cail_pll_write,
421 };
422
423 int radeon_atombios_init(struct radeon_device *rdev)
424 {
425         atom_card_info.dev = rdev->ddev;
426         rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
427         radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
428         return 0;
429 }
430
431 void radeon_atombios_fini(struct radeon_device *rdev)
432 {
433         kfree(rdev->mode_info.atom_context);
434 }
435
436 int radeon_combios_init(struct radeon_device *rdev)
437 {
438         radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
439         return 0;
440 }
441
442 void radeon_combios_fini(struct radeon_device *rdev)
443 {
444 }
445
446 int radeon_modeset_init(struct radeon_device *rdev);
447 void radeon_modeset_fini(struct radeon_device *rdev);
448
449
450 /*
451  * Radeon device.
452  */
453 int radeon_device_init(struct radeon_device *rdev,
454                        struct drm_device *ddev,
455                        struct pci_dev *pdev,
456                        uint32_t flags)
457 {
458         int r, ret;
459         int dma_bits;
460
461         DRM_INFO("radeon: Initializing kernel modesetting.\n");
462         rdev->shutdown = false;
463         rdev->ddev = ddev;
464         rdev->pdev = pdev;
465         rdev->flags = flags;
466         rdev->family = flags & RADEON_FAMILY_MASK;
467         rdev->is_atom_bios = false;
468         rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
469         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
470         rdev->gpu_lockup = false;
471         /* mutex initialization are all done here so we
472          * can recall function without having locking issues */
473         mutex_init(&rdev->cs_mutex);
474         mutex_init(&rdev->ib_pool.mutex);
475         mutex_init(&rdev->cp.mutex);
476         rwlock_init(&rdev->fence_drv.lock);
477
478         if (radeon_agpmode == -1) {
479                 rdev->flags &= ~RADEON_IS_AGP;
480                 if (rdev->family > CHIP_RV515 ||
481                     rdev->family == CHIP_RV380 ||
482                     rdev->family == CHIP_RV410 ||
483                     rdev->family == CHIP_R423) {
484                         DRM_INFO("Forcing AGP to PCIE mode\n");
485                         rdev->flags |= RADEON_IS_PCIE;
486                 } else {
487                         DRM_INFO("Forcing AGP to PCI mode\n");
488                         rdev->flags |= RADEON_IS_PCI;
489                 }
490         }
491
492         /* Set asic functions */
493         r = radeon_asic_init(rdev);
494         if (r) {
495                 return r;
496         }
497         r = radeon_init(rdev);
498         if (r) {
499                 return r;
500         }
501
502         /* set DMA mask + need_dma32 flags.
503          * PCIE - can handle 40-bits.
504          * IGP - can handle 40-bits (in theory)
505          * AGP - generally dma32 is safest
506          * PCI - only dma32
507          */
508         rdev->need_dma32 = false;
509         if (rdev->flags & RADEON_IS_AGP)
510                 rdev->need_dma32 = true;
511         if (rdev->flags & RADEON_IS_PCI)
512                 rdev->need_dma32 = true;
513
514         dma_bits = rdev->need_dma32 ? 32 : 40;
515         r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
516         if (r) {
517                 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
518         }
519
520         /* Registers mapping */
521         /* TODO: block userspace mapping of io register */
522         rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
523         rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
524         rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
525         if (rdev->rmmio == NULL) {
526                 return -ENOMEM;
527         }
528         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
529         DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
530
531         /* Setup errata flags */
532         radeon_errata(rdev);
533         /* Initialize scratch registers */
534         radeon_scratch_init(rdev);
535         /* Initialize surface registers */
536         radeon_surface_init(rdev);
537
538         /* TODO: disable VGA need to use VGA request */
539         /* BIOS*/
540         if (!radeon_get_bios(rdev)) {
541                 if (ASIC_IS_AVIVO(rdev))
542                         return -EINVAL;
543         }
544         if (rdev->is_atom_bios) {
545                 r = radeon_atombios_init(rdev);
546                 if (r) {
547                         return r;
548                 }
549         } else {
550                 r = radeon_combios_init(rdev);
551                 if (r) {
552                         return r;
553                 }
554         }
555         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
556         if (radeon_gpu_reset(rdev)) {
557                 /* FIXME: what do we want to do here ? */
558         }
559         /* check if cards are posted or not */
560         if (!radeon_card_posted(rdev) && rdev->bios) {
561                 DRM_INFO("GPU not posted. posting now...\n");
562                 if (rdev->is_atom_bios) {
563                         atom_asic_init(rdev->mode_info.atom_context);
564                 } else {
565                         radeon_combios_asic_init(rdev->ddev);
566                 }
567         }
568         /* Initialize clocks */
569         r = radeon_clocks_init(rdev);
570         if (r) {
571                 return r;
572         }
573         /* Get vram informations */
574         radeon_vram_info(rdev);
575
576         /* Add an MTRR for the VRAM */
577         rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
578                                       MTRR_TYPE_WRCOMB, 1);
579         DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
580                  rdev->mc.real_vram_size >> 20,
581                  (unsigned)rdev->mc.aper_size >> 20);
582         DRM_INFO("RAM width %dbits %cDR\n",
583                  rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
584         /* Initialize memory controller (also test AGP) */
585         r = radeon_mc_init(rdev);
586         if (r) {
587                 return r;
588         }
589         /* Fence driver */
590         r = radeon_fence_driver_init(rdev);
591         if (r) {
592                 return r;
593         }
594         r = radeon_irq_kms_init(rdev);
595         if (r) {
596                 return r;
597         }
598         /* Memory manager */
599         r = radeon_object_init(rdev);
600         if (r) {
601                 return r;
602         }
603         /* Initialize GART (initialize after TTM so we can allocate
604          * memory through TTM but finalize after TTM) */
605         r = radeon_gart_enable(rdev);
606         if (!r) {
607                 r = radeon_gem_init(rdev);
608         }
609
610         /* 1M ring buffer */
611         if (!r) {
612                 r = radeon_cp_init(rdev, 1024 * 1024);
613         }
614         if (!r) {
615                 r = radeon_wb_init(rdev);
616                 if (r) {
617                         DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
618                         return r;
619                 }
620         }
621         if (!r) {
622                 r = radeon_ib_pool_init(rdev);
623                 if (r) {
624                         DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
625                         return r;
626                 }
627         }
628         if (!r) {
629                 r = radeon_ib_test(rdev);
630                 if (r) {
631                         DRM_ERROR("radeon: failled testing IB (%d).\n", r);
632                         return r;
633                 }
634         }
635         ret = r;
636         r = radeon_modeset_init(rdev);
637         if (r) {
638                 return r;
639         }
640         if (!ret) {
641                 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
642         }
643         if (radeon_testing) {
644                 radeon_test_moves(rdev);
645         }
646         if (radeon_benchmarking) {
647                 radeon_benchmark(rdev);
648         }
649         return ret;
650 }
651
652 void radeon_device_fini(struct radeon_device *rdev)
653 {
654         if (rdev == NULL || rdev->rmmio == NULL) {
655                 return;
656         }
657         DRM_INFO("radeon: finishing device.\n");
658         rdev->shutdown = true;
659         /* Order matter so becarefull if you rearrange anythings */
660         radeon_modeset_fini(rdev);
661         radeon_ib_pool_fini(rdev);
662         radeon_cp_fini(rdev);
663         radeon_wb_fini(rdev);
664         radeon_gem_fini(rdev);
665         radeon_object_fini(rdev);
666         /* mc_fini must be after object_fini */
667         radeon_mc_fini(rdev);
668 #if __OS_HAS_AGP
669         radeon_agp_fini(rdev);
670 #endif
671         radeon_irq_kms_fini(rdev);
672         radeon_fence_driver_fini(rdev);
673         radeon_clocks_fini(rdev);
674         if (rdev->is_atom_bios) {
675                 radeon_atombios_fini(rdev);
676         } else {
677                 radeon_combios_fini(rdev);
678         }
679         kfree(rdev->bios);
680         rdev->bios = NULL;
681         iounmap(rdev->rmmio);
682         rdev->rmmio = NULL;
683 }
684
685
686 /*
687  * Suspend & resume.
688  */
689 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
690 {
691         struct radeon_device *rdev = dev->dev_private;
692         struct drm_crtc *crtc;
693
694         if (dev == NULL || rdev == NULL) {
695                 return -ENODEV;
696         }
697         if (state.event == PM_EVENT_PRETHAW) {
698                 return 0;
699         }
700         /* unpin the front buffers */
701         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
702                 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
703                 struct radeon_object *robj;
704
705                 if (rfb == NULL || rfb->obj == NULL) {
706                         continue;
707                 }
708                 robj = rfb->obj->driver_private;
709                 if (robj != rdev->fbdev_robj) {
710                         radeon_object_unpin(robj);
711                 }
712         }
713         /* evict vram memory */
714         radeon_object_evict_vram(rdev);
715         /* wait for gpu to finish processing current batch */
716         radeon_fence_wait_last(rdev);
717
718         radeon_cp_disable(rdev);
719         radeon_gart_disable(rdev);
720
721         /* evict remaining vram memory */
722         radeon_object_evict_vram(rdev);
723
724         rdev->irq.sw_int = false;
725         radeon_irq_set(rdev);
726
727         pci_save_state(dev->pdev);
728         if (state.event == PM_EVENT_SUSPEND) {
729                 /* Shut down the device */
730                 pci_disable_device(dev->pdev);
731                 pci_set_power_state(dev->pdev, PCI_D3hot);
732         }
733         acquire_console_sem();
734         fb_set_suspend(rdev->fbdev_info, 1);
735         release_console_sem();
736         return 0;
737 }
738
739 int radeon_resume_kms(struct drm_device *dev)
740 {
741         struct radeon_device *rdev = dev->dev_private;
742         int r;
743
744         acquire_console_sem();
745         pci_set_power_state(dev->pdev, PCI_D0);
746         pci_restore_state(dev->pdev);
747         if (pci_enable_device(dev->pdev)) {
748                 release_console_sem();
749                 return -1;
750         }
751         pci_set_master(dev->pdev);
752         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
753         if (radeon_gpu_reset(rdev)) {
754                 /* FIXME: what do we want to do here ? */
755         }
756         /* post card */
757         if (rdev->is_atom_bios) {
758                 atom_asic_init(rdev->mode_info.atom_context);
759         } else {
760                 radeon_combios_asic_init(rdev->ddev);
761         }
762         /* Initialize clocks */
763         r = radeon_clocks_init(rdev);
764         if (r) {
765                 release_console_sem();
766                 return r;
767         }
768         /* Enable IRQ */
769         rdev->irq.sw_int = true;
770         radeon_irq_set(rdev);
771         /* Initialize GPU Memory Controller */
772         r = radeon_mc_init(rdev);
773         if (r) {
774                 goto out;
775         }
776         r = radeon_gart_enable(rdev);
777         if (r) {
778                 goto out;
779         }
780         r = radeon_cp_init(rdev, rdev->cp.ring_size);
781         if (r) {
782                 goto out;
783         }
784 out:
785         fb_set_suspend(rdev->fbdev_info, 0);
786         release_console_sem();
787
788         /* blat the mode back in */
789         drm_helper_resume_force_mode(dev);
790         return 0;
791 }
792
793
794 /*
795  * Debugfs
796  */
797 struct radeon_debugfs {
798         struct drm_info_list    *files;
799         unsigned                num_files;
800 };
801 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
802 static unsigned _radeon_debugfs_count = 0;
803
804 int radeon_debugfs_add_files(struct radeon_device *rdev,
805                              struct drm_info_list *files,
806                              unsigned nfiles)
807 {
808         unsigned i;
809
810         for (i = 0; i < _radeon_debugfs_count; i++) {
811                 if (_radeon_debugfs[i].files == files) {
812                         /* Already registered */
813                         return 0;
814                 }
815         }
816         if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
817                 DRM_ERROR("Reached maximum number of debugfs files.\n");
818                 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
819                 return -EINVAL;
820         }
821         _radeon_debugfs[_radeon_debugfs_count].files = files;
822         _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
823         _radeon_debugfs_count++;
824 #if defined(CONFIG_DEBUG_FS)
825         drm_debugfs_create_files(files, nfiles,
826                                  rdev->ddev->control->debugfs_root,
827                                  rdev->ddev->control);
828         drm_debugfs_create_files(files, nfiles,
829                                  rdev->ddev->primary->debugfs_root,
830                                  rdev->ddev->primary);
831 #endif
832         return 0;
833 }
834
835 #if defined(CONFIG_DEBUG_FS)
836 int radeon_debugfs_init(struct drm_minor *minor)
837 {
838         return 0;
839 }
840
841 void radeon_debugfs_cleanup(struct drm_minor *minor)
842 {
843         unsigned i;
844
845         for (i = 0; i < _radeon_debugfs_count; i++) {
846                 drm_debugfs_remove_files(_radeon_debugfs[i].files,
847                                          _radeon_debugfs[i].num_files, minor);
848         }
849 }
850 #endif