2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
36 #include "radeon_drm.h"
37 #include "r100_track.h"
40 #include "r300_reg_safe.h"
42 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
45 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
46 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
47 * However, scheduling such write to the ring seems harmless, i suspect
48 * the CP read collide with the flush somehow, or maybe the MC, hard to
49 * tell. (Jerome Glisse)
53 * rv370,rv380 PCIE GART
55 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
57 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
62 /* Workaround HW bug do flush 2 times */
63 for (i = 0; i < 2; i++) {
64 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
66 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
67 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
72 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
74 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
76 if (i < 0 || i > rdev->gart.num_gpu_pages) {
79 addr = (lower_32_bits(addr) >> 8) |
80 ((upper_32_bits(addr) & 0xff) << 24) |
82 /* on x86 we want this to be CPU endian, on powerpc
83 * on powerpc without HW swappers, it'll get swapped on way
84 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
85 writel(addr, ((void __iomem *)ptr) + (i * 4));
89 int rv370_pcie_gart_init(struct radeon_device *rdev)
93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n");
97 /* Initialize common gart structure */
98 r = radeon_gart_init(rdev);
101 r = rv370_debugfs_pcie_gart_info_init(rdev);
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
104 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
105 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
106 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
107 return radeon_gart_table_vram_alloc(rdev);
110 int rv370_pcie_gart_enable(struct radeon_device *rdev)
116 if (rdev->gart.table.vram.robj == NULL) {
117 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
120 r = radeon_gart_table_vram_pin(rdev);
123 radeon_gart_restore(rdev);
124 /* discard memory request outside of configured range */
125 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
126 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
127 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
128 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
130 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
131 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
132 table_addr = rdev->gart.table_addr;
133 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
134 /* FIXME: setup default page */
135 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
136 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
138 WREG32_PCIE(0x18, 0);
139 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
140 tmp |= RADEON_PCIE_TX_GART_EN;
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
143 rv370_pcie_gart_tlb_flush(rdev);
144 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
145 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
146 rdev->gart.ready = true;
150 void rv370_pcie_gart_disable(struct radeon_device *rdev)
155 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
156 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
157 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
158 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
159 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
160 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
161 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
162 if (rdev->gart.table.vram.robj) {
163 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
164 if (likely(r == 0)) {
165 radeon_bo_kunmap(rdev->gart.table.vram.robj);
166 radeon_bo_unpin(rdev->gart.table.vram.robj);
167 radeon_bo_unreserve(rdev->gart.table.vram.robj);
172 void rv370_pcie_gart_fini(struct radeon_device *rdev)
174 radeon_gart_fini(rdev);
175 rv370_pcie_gart_disable(rdev);
176 radeon_gart_table_vram_free(rdev);
179 void r300_fence_ring_emit(struct radeon_device *rdev,
180 struct radeon_fence *fence)
182 /* Who ever call radeon_fence_emit should call ring_lock and ask
183 * for enough space (today caller are ib schedule and buffer move) */
184 /* Write SC register so SC & US assert idle */
185 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
186 radeon_ring_write(rdev, 0);
187 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
188 radeon_ring_write(rdev, 0);
190 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
191 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
192 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
193 radeon_ring_write(rdev, R300_ZC_FLUSH);
194 /* Wait until IDLE & CLEAN */
195 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
196 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
197 RADEON_WAIT_2D_IDLECLEAN |
198 RADEON_WAIT_DMA_GUI_IDLE));
199 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
200 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
201 RADEON_HDP_READ_BUFFER_INVALIDATE);
202 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
203 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
204 /* Emit fence sequence & fire IRQ */
205 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
206 radeon_ring_write(rdev, fence->seq);
207 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
208 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
211 void r300_ring_start(struct radeon_device *rdev)
213 unsigned gb_tile_config;
216 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
217 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
218 switch(rdev->num_gb_pipes) {
220 gb_tile_config |= R300_PIPE_COUNT_R300;
223 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
226 gb_tile_config |= R300_PIPE_COUNT_R420;
230 gb_tile_config |= R300_PIPE_COUNT_RV350;
234 r = radeon_ring_lock(rdev, 64);
238 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
239 radeon_ring_write(rdev,
240 RADEON_ISYNC_ANY2D_IDLE3D |
241 RADEON_ISYNC_ANY3D_IDLE2D |
242 RADEON_ISYNC_WAIT_IDLEGUI |
243 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
244 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
245 radeon_ring_write(rdev, gb_tile_config);
246 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
247 radeon_ring_write(rdev,
248 RADEON_WAIT_2D_IDLECLEAN |
249 RADEON_WAIT_3D_IDLECLEAN);
250 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
251 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
252 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
253 radeon_ring_write(rdev, 0);
254 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
255 radeon_ring_write(rdev, 0);
256 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
257 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
258 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
259 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
260 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
261 radeon_ring_write(rdev,
262 RADEON_WAIT_2D_IDLECLEAN |
263 RADEON_WAIT_3D_IDLECLEAN);
264 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
265 radeon_ring_write(rdev, 0);
266 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
267 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
268 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
269 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
270 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
271 radeon_ring_write(rdev,
272 ((6 << R300_MS_X0_SHIFT) |
273 (6 << R300_MS_Y0_SHIFT) |
274 (6 << R300_MS_X1_SHIFT) |
275 (6 << R300_MS_Y1_SHIFT) |
276 (6 << R300_MS_X2_SHIFT) |
277 (6 << R300_MS_Y2_SHIFT) |
278 (6 << R300_MSBD0_Y_SHIFT) |
279 (6 << R300_MSBD0_X_SHIFT)));
280 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
281 radeon_ring_write(rdev,
282 ((6 << R300_MS_X3_SHIFT) |
283 (6 << R300_MS_Y3_SHIFT) |
284 (6 << R300_MS_X4_SHIFT) |
285 (6 << R300_MS_Y4_SHIFT) |
286 (6 << R300_MS_X5_SHIFT) |
287 (6 << R300_MS_Y5_SHIFT) |
288 (6 << R300_MSBD1_SHIFT)));
289 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
290 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
291 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
292 radeon_ring_write(rdev,
293 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
294 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
295 radeon_ring_write(rdev,
296 R300_GEOMETRY_ROUND_NEAREST |
297 R300_COLOR_ROUND_NEAREST);
298 radeon_ring_unlock_commit(rdev);
301 void r300_errata(struct radeon_device *rdev)
303 rdev->pll_errata = 0;
305 if (rdev->family == CHIP_R300 &&
306 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
307 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
311 int r300_mc_wait_for_idle(struct radeon_device *rdev)
316 for (i = 0; i < rdev->usec_timeout; i++) {
318 tmp = RREG32(RADEON_MC_STATUS);
319 if (tmp & R300_MC_IDLE) {
327 void r300_gpu_init(struct radeon_device *rdev)
329 uint32_t gb_tile_config, tmp;
331 /* FIXME: rv380 one pipes ? */
332 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
333 (rdev->family == CHIP_R350)) {
335 rdev->num_gb_pipes = 2;
337 /* rv350,rv370,rv380,r300 AD */
338 rdev->num_gb_pipes = 1;
340 rdev->num_z_pipes = 1;
341 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
342 switch (rdev->num_gb_pipes) {
344 gb_tile_config |= R300_PIPE_COUNT_R300;
347 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
350 gb_tile_config |= R300_PIPE_COUNT_R420;
354 gb_tile_config |= R300_PIPE_COUNT_RV350;
357 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
359 if (r100_gui_wait_for_idle(rdev)) {
360 printk(KERN_WARNING "Failed to wait GUI idle while "
361 "programming pipes. Bad things might happen.\n");
364 tmp = RREG32(R300_DST_PIPE_CONFIG);
365 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
367 WREG32(R300_RB2D_DSTCACHE_MODE,
368 R300_DC_AUTOFLUSH_ENABLE |
369 R300_DC_DC_DISABLE_IGNORE_PE);
371 if (r100_gui_wait_for_idle(rdev)) {
372 printk(KERN_WARNING "Failed to wait GUI idle while "
373 "programming pipes. Bad things might happen.\n");
375 if (r300_mc_wait_for_idle(rdev)) {
376 printk(KERN_WARNING "Failed to wait MC idle while "
377 "programming pipes. Bad things might happen.\n");
379 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
380 rdev->num_gb_pipes, rdev->num_z_pipes);
383 bool r300_gpu_is_lockup(struct radeon_device *rdev)
388 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
389 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
390 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
393 /* force CP activities */
394 r = radeon_ring_lock(rdev, 2);
397 radeon_ring_write(rdev, 0x80000000);
398 radeon_ring_write(rdev, 0x80000000);
399 radeon_ring_unlock_commit(rdev);
401 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
402 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
405 int r300_asic_reset(struct radeon_device *rdev)
407 struct r100_mc_save save;
410 r100_mc_stop(rdev, &save);
411 status = RREG32(R_000E40_RBBM_STATUS);
412 if (!G_000E40_GUI_ACTIVE(status)) {
415 status = RREG32(R_000E40_RBBM_STATUS);
416 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
418 WREG32(RADEON_CP_CSQ_CNTL, 0);
419 tmp = RREG32(RADEON_CP_RB_CNTL);
420 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
421 WREG32(RADEON_CP_RB_RPTR_WR, 0);
422 WREG32(RADEON_CP_RB_WPTR, 0);
423 WREG32(RADEON_CP_RB_CNTL, tmp);
425 pci_save_state(rdev->pdev);
426 /* disable bus mastering */
427 r100_bm_disable(rdev);
428 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
429 S_0000F0_SOFT_RESET_GA(1));
430 RREG32(R_0000F0_RBBM_SOFT_RESET);
432 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
434 status = RREG32(R_000E40_RBBM_STATUS);
435 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
436 /* resetting the CP seems to be problematic sometimes it end up
437 * hard locking the computer, but it's necessary for successfull
438 * reset more test & playing is needed on R3XX/R4XX to find a
439 * reliable (if any solution)
441 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
442 RREG32(R_0000F0_RBBM_SOFT_RESET);
444 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
446 status = RREG32(R_000E40_RBBM_STATUS);
447 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
449 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
450 RREG32(R_0000F0_RBBM_SOFT_RESET);
452 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
454 status = RREG32(R_000E40_RBBM_STATUS);
455 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
456 /* restore PCI & busmastering */
457 pci_restore_state(rdev->pdev);
458 r100_enable_bm(rdev);
459 /* Check if GPU is idle */
460 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
461 dev_err(rdev->dev, "failed to reset GPU\n");
462 rdev->gpu_lockup = true;
465 r100_mc_resume(rdev, &save);
466 dev_info(rdev->dev, "GPU reset succeed\n");
471 * r300,r350,rv350,rv380 VRAM info
473 void r300_mc_init(struct radeon_device *rdev)
478 /* DDR for all card after R300 & IGP */
479 rdev->mc.vram_is_ddr = true;
480 tmp = RREG32(RADEON_MEM_CNTL);
481 tmp &= R300_MEM_NUM_CHANNELS_MASK;
483 case 0: rdev->mc.vram_width = 64; break;
484 case 1: rdev->mc.vram_width = 128; break;
485 case 2: rdev->mc.vram_width = 256; break;
486 default: rdev->mc.vram_width = 128; break;
488 r100_vram_init_sizes(rdev);
489 base = rdev->mc.aper_base;
490 if (rdev->flags & RADEON_IS_IGP)
491 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
492 radeon_vram_location(rdev, &rdev->mc, base);
493 if (!(rdev->flags & RADEON_IS_AGP))
494 radeon_gtt_location(rdev, &rdev->mc);
495 radeon_update_bandwidth_info(rdev);
498 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
500 uint32_t link_width_cntl, mask;
502 if (rdev->flags & RADEON_IS_IGP)
505 if (!(rdev->flags & RADEON_IS_PCIE))
508 /* FIXME wait for idle */
512 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
515 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
518 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
521 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
524 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
527 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
531 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
535 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
537 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
538 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
541 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
542 RADEON_PCIE_LC_RECONFIG_NOW |
543 RADEON_PCIE_LC_RECONFIG_LATER |
544 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
545 link_width_cntl |= mask;
546 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
547 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
548 RADEON_PCIE_LC_RECONFIG_NOW));
550 /* wait for lane set to complete */
551 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
552 while (link_width_cntl == 0xffffffff)
553 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
557 int rv370_get_pcie_lanes(struct radeon_device *rdev)
561 if (rdev->flags & RADEON_IS_IGP)
564 if (!(rdev->flags & RADEON_IS_PCIE))
567 /* FIXME wait for idle */
569 if (rdev->family < CHIP_R600)
570 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
572 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
574 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
575 case RADEON_PCIE_LC_LINK_WIDTH_X0:
577 case RADEON_PCIE_LC_LINK_WIDTH_X1:
579 case RADEON_PCIE_LC_LINK_WIDTH_X2:
581 case RADEON_PCIE_LC_LINK_WIDTH_X4:
583 case RADEON_PCIE_LC_LINK_WIDTH_X8:
585 case RADEON_PCIE_LC_LINK_WIDTH_X16:
591 #if defined(CONFIG_DEBUG_FS)
592 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
594 struct drm_info_node *node = (struct drm_info_node *) m->private;
595 struct drm_device *dev = node->minor->dev;
596 struct radeon_device *rdev = dev->dev_private;
599 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
600 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
601 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
602 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
603 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
604 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
605 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
606 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
607 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
608 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
609 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
610 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
611 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
612 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
616 static struct drm_info_list rv370_pcie_gart_info_list[] = {
617 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
621 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
623 #if defined(CONFIG_DEBUG_FS)
624 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
630 static int r300_packet0_check(struct radeon_cs_parser *p,
631 struct radeon_cs_packet *pkt,
632 unsigned idx, unsigned reg)
634 struct radeon_cs_reloc *reloc;
635 struct r100_cs_track *track;
636 volatile uint32_t *ib;
637 uint32_t tmp, tile_flags = 0;
643 track = (struct r100_cs_track *)p->track;
644 idx_value = radeon_get_ib_value(p, idx);
647 case AVIVO_D1MODE_VLINE_START_END:
648 case RADEON_CRTC_GUI_TRIG_VLINE:
649 r = r100_cs_packet_parse_vline(p);
651 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
653 r100_cs_dump_packet(p, pkt);
657 case RADEON_DST_PITCH_OFFSET:
658 case RADEON_SRC_PITCH_OFFSET:
659 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
663 case R300_RB3D_COLOROFFSET0:
664 case R300_RB3D_COLOROFFSET1:
665 case R300_RB3D_COLOROFFSET2:
666 case R300_RB3D_COLOROFFSET3:
667 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
668 r = r100_cs_packet_next_reloc(p, &reloc);
670 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
672 r100_cs_dump_packet(p, pkt);
675 track->cb[i].robj = reloc->robj;
676 track->cb[i].offset = idx_value;
677 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
679 case R300_ZB_DEPTHOFFSET:
680 r = r100_cs_packet_next_reloc(p, &reloc);
682 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
684 r100_cs_dump_packet(p, pkt);
687 track->zb.robj = reloc->robj;
688 track->zb.offset = idx_value;
689 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
691 case R300_TX_OFFSET_0:
692 case R300_TX_OFFSET_0+4:
693 case R300_TX_OFFSET_0+8:
694 case R300_TX_OFFSET_0+12:
695 case R300_TX_OFFSET_0+16:
696 case R300_TX_OFFSET_0+20:
697 case R300_TX_OFFSET_0+24:
698 case R300_TX_OFFSET_0+28:
699 case R300_TX_OFFSET_0+32:
700 case R300_TX_OFFSET_0+36:
701 case R300_TX_OFFSET_0+40:
702 case R300_TX_OFFSET_0+44:
703 case R300_TX_OFFSET_0+48:
704 case R300_TX_OFFSET_0+52:
705 case R300_TX_OFFSET_0+56:
706 case R300_TX_OFFSET_0+60:
707 i = (reg - R300_TX_OFFSET_0) >> 2;
708 r = r100_cs_packet_next_reloc(p, &reloc);
710 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
712 r100_cs_dump_packet(p, pkt);
716 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
717 tile_flags |= R300_TXO_MACRO_TILE;
718 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
719 tile_flags |= R300_TXO_MICRO_TILE;
720 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
721 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
723 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
726 track->textures[i].robj = reloc->robj;
728 /* Tracked registers */
731 track->vap_vf_cntl = idx_value;
735 track->vtx_size = idx_value & 0x7F;
738 /* VAP_VF_MAX_VTX_INDX */
739 track->max_indx = idx_value & 0x00FFFFFFUL;
742 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
743 if (p->rdev->family < CHIP_RV515)
745 track->vap_alt_nverts = idx_value & 0xFFFFFF;
749 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
750 if (p->rdev->family < CHIP_RV515) {
756 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
762 /* RB3D_COLORPITCH0 */
763 /* RB3D_COLORPITCH1 */
764 /* RB3D_COLORPITCH2 */
765 /* RB3D_COLORPITCH3 */
766 r = r100_cs_packet_next_reloc(p, &reloc);
768 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
770 r100_cs_dump_packet(p, pkt);
774 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
775 tile_flags |= R300_COLOR_TILE_ENABLE;
776 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
777 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
778 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
779 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
781 tmp = idx_value & ~(0x7 << 16);
784 i = (reg - 0x4E38) >> 2;
785 track->cb[i].pitch = idx_value & 0x3FFE;
786 switch (((idx_value >> 21) & 0xF)) {
790 track->cb[i].cpp = 1;
796 track->cb[i].cpp = 2;
799 track->cb[i].cpp = 4;
802 track->cb[i].cpp = 8;
805 track->cb[i].cpp = 16;
808 DRM_ERROR("Invalid color buffer format (%d) !\n",
809 ((idx_value >> 21) & 0xF));
816 track->z_enabled = true;
818 track->z_enabled = false;
823 switch ((idx_value & 0xF)) {
832 DRM_ERROR("Invalid z buffer format (%d) !\n",
839 r = r100_cs_packet_next_reloc(p, &reloc);
841 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
843 r100_cs_dump_packet(p, pkt);
847 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
848 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
849 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
850 tile_flags |= R300_DEPTHMICROTILE_TILED;
851 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
852 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
854 tmp = idx_value & ~(0x7 << 16);
858 track->zb.pitch = idx_value & 0x3FFC;
861 for (i = 0; i < 16; i++) {
864 enabled = !!(idx_value & (1 << i));
865 track->textures[i].enabled = enabled;
884 /* TX_FORMAT1_[0-15] */
885 i = (reg - 0x44C0) >> 2;
886 tmp = (idx_value >> 25) & 0x3;
887 track->textures[i].tex_coord_type = tmp;
888 switch ((idx_value & 0x1F)) {
889 case R300_TX_FORMAT_X8:
890 case R300_TX_FORMAT_Y4X4:
891 case R300_TX_FORMAT_Z3Y3X2:
892 track->textures[i].cpp = 1;
894 case R300_TX_FORMAT_X16:
895 case R300_TX_FORMAT_Y8X8:
896 case R300_TX_FORMAT_Z5Y6X5:
897 case R300_TX_FORMAT_Z6Y5X5:
898 case R300_TX_FORMAT_W4Z4Y4X4:
899 case R300_TX_FORMAT_W1Z5Y5X5:
900 case R300_TX_FORMAT_D3DMFT_CxV8U8:
901 case R300_TX_FORMAT_B8G8_B8G8:
902 case R300_TX_FORMAT_G8R8_G8B8:
903 track->textures[i].cpp = 2;
905 case R300_TX_FORMAT_Y16X16:
906 case R300_TX_FORMAT_Z11Y11X10:
907 case R300_TX_FORMAT_Z10Y11X11:
908 case R300_TX_FORMAT_W8Z8Y8X8:
909 case R300_TX_FORMAT_W2Z10Y10X10:
911 case R300_TX_FORMAT_FL_I32:
913 track->textures[i].cpp = 4;
915 case R300_TX_FORMAT_W16Z16Y16X16:
916 case R300_TX_FORMAT_FL_R16G16B16A16:
917 case R300_TX_FORMAT_FL_I32A32:
918 track->textures[i].cpp = 8;
920 case R300_TX_FORMAT_FL_R32G32B32A32:
921 track->textures[i].cpp = 16;
923 case R300_TX_FORMAT_DXT1:
924 track->textures[i].cpp = 1;
925 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
927 case R300_TX_FORMAT_ATI2N:
928 if (p->rdev->family < CHIP_R420) {
929 DRM_ERROR("Invalid texture format %u\n",
933 /* The same rules apply as for DXT3/5. */
935 case R300_TX_FORMAT_DXT3:
936 case R300_TX_FORMAT_DXT5:
937 track->textures[i].cpp = 1;
938 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
941 DRM_ERROR("Invalid texture format %u\n",
963 /* TX_FILTER0_[0-15] */
964 i = (reg - 0x4400) >> 2;
965 tmp = idx_value & 0x7;
966 if (tmp == 2 || tmp == 4 || tmp == 6) {
967 track->textures[i].roundup_w = false;
969 tmp = (idx_value >> 3) & 0x7;
970 if (tmp == 2 || tmp == 4 || tmp == 6) {
971 track->textures[i].roundup_h = false;
990 /* TX_FORMAT2_[0-15] */
991 i = (reg - 0x4500) >> 2;
992 tmp = idx_value & 0x3FFF;
993 track->textures[i].pitch = tmp + 1;
994 if (p->rdev->family >= CHIP_RV515) {
995 tmp = ((idx_value >> 15) & 1) << 11;
996 track->textures[i].width_11 = tmp;
997 tmp = ((idx_value >> 16) & 1) << 11;
998 track->textures[i].height_11 = tmp;
1001 if (idx_value & (1 << 14)) {
1002 /* The same rules apply as for DXT1. */
1003 track->textures[i].compress_format =
1004 R100_TRACK_COMP_DXT1;
1006 } else if (idx_value & (1 << 14)) {
1007 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1027 /* TX_FORMAT0_[0-15] */
1028 i = (reg - 0x4480) >> 2;
1029 tmp = idx_value & 0x7FF;
1030 track->textures[i].width = tmp + 1;
1031 tmp = (idx_value >> 11) & 0x7FF;
1032 track->textures[i].height = tmp + 1;
1033 tmp = (idx_value >> 26) & 0xF;
1034 track->textures[i].num_levels = tmp;
1035 tmp = idx_value & (1 << 31);
1036 track->textures[i].use_pitch = !!tmp;
1037 tmp = (idx_value >> 22) & 0xF;
1038 track->textures[i].txdepth = tmp;
1040 case R300_ZB_ZPASS_ADDR:
1041 r = r100_cs_packet_next_reloc(p, &reloc);
1043 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1045 r100_cs_dump_packet(p, pkt);
1048 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1051 /* RB3D_COLOR_CHANNEL_MASK */
1052 track->color_channel_mask = idx_value;
1056 track->fastfill = !!(idx_value & (1 << 2));
1059 /* RB3D_BLENDCNTL */
1060 track->blend_read_enable = !!(idx_value & (1 << 2));
1063 /* valid register only on RV530 */
1064 if (p->rdev->family == CHIP_RV530)
1066 /* fallthrough do not move */
1072 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1077 static int r300_packet3_check(struct radeon_cs_parser *p,
1078 struct radeon_cs_packet *pkt)
1080 struct radeon_cs_reloc *reloc;
1081 struct r100_cs_track *track;
1082 volatile uint32_t *ib;
1088 track = (struct r100_cs_track *)p->track;
1089 switch(pkt->opcode) {
1090 case PACKET3_3D_LOAD_VBPNTR:
1091 r = r100_packet3_load_vbpntr(p, pkt, idx);
1095 case PACKET3_INDX_BUFFER:
1096 r = r100_cs_packet_next_reloc(p, &reloc);
1098 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1099 r100_cs_dump_packet(p, pkt);
1102 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1103 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1109 case PACKET3_3D_DRAW_IMMD:
1110 /* Number of dwords is vtx_size * (num_vertices - 1)
1111 * PRIM_WALK must be equal to 3 vertex data in embedded
1113 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1114 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1117 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1118 track->immd_dwords = pkt->count - 1;
1119 r = r100_cs_track_check(p->rdev, track);
1124 case PACKET3_3D_DRAW_IMMD_2:
1125 /* Number of dwords is vtx_size * (num_vertices - 1)
1126 * PRIM_WALK must be equal to 3 vertex data in embedded
1128 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1129 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1132 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1133 track->immd_dwords = pkt->count;
1134 r = r100_cs_track_check(p->rdev, track);
1139 case PACKET3_3D_DRAW_VBUF:
1140 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1141 r = r100_cs_track_check(p->rdev, track);
1146 case PACKET3_3D_DRAW_VBUF_2:
1147 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1148 r = r100_cs_track_check(p->rdev, track);
1153 case PACKET3_3D_DRAW_INDX:
1154 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1155 r = r100_cs_track_check(p->rdev, track);
1160 case PACKET3_3D_DRAW_INDX_2:
1161 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1162 r = r100_cs_track_check(p->rdev, track);
1170 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1176 int r300_cs_parse(struct radeon_cs_parser *p)
1178 struct radeon_cs_packet pkt;
1179 struct r100_cs_track *track;
1182 track = kzalloc(sizeof(*track), GFP_KERNEL);
1183 r100_cs_track_clear(p->rdev, track);
1186 r = r100_cs_packet_parse(p, &pkt, p->idx);
1190 p->idx += pkt.count + 2;
1193 r = r100_cs_parse_packet0(p, &pkt,
1194 p->rdev->config.r300.reg_safe_bm,
1195 p->rdev->config.r300.reg_safe_bm_size,
1196 &r300_packet0_check);
1201 r = r300_packet3_check(p, &pkt);
1204 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1210 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1214 void r300_set_reg_safe(struct radeon_device *rdev)
1216 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1217 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1220 void r300_mc_program(struct radeon_device *rdev)
1222 struct r100_mc_save save;
1225 r = r100_debugfs_mc_info_init(rdev);
1227 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1230 /* Stops all mc clients */
1231 r100_mc_stop(rdev, &save);
1232 if (rdev->flags & RADEON_IS_AGP) {
1233 WREG32(R_00014C_MC_AGP_LOCATION,
1234 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1235 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1236 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1237 WREG32(R_00015C_AGP_BASE_2,
1238 upper_32_bits(rdev->mc.agp_base) & 0xff);
1240 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1241 WREG32(R_000170_AGP_BASE, 0);
1242 WREG32(R_00015C_AGP_BASE_2, 0);
1244 /* Wait for mc idle */
1245 if (r300_mc_wait_for_idle(rdev))
1246 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1247 /* Program MC, should be a 32bits limited address space */
1248 WREG32(R_000148_MC_FB_LOCATION,
1249 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1250 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1251 r100_mc_resume(rdev, &save);
1254 void r300_clock_startup(struct radeon_device *rdev)
1258 if (radeon_dynclks != -1 && radeon_dynclks)
1259 radeon_legacy_set_clock_gating(rdev, 1);
1260 /* We need to force on some of the block */
1261 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1262 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1263 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1264 tmp |= S_00000D_FORCE_VAP(1);
1265 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1268 static int r300_startup(struct radeon_device *rdev)
1272 /* set common regs */
1273 r100_set_common_regs(rdev);
1275 r300_mc_program(rdev);
1277 r300_clock_startup(rdev);
1278 /* Initialize GPU configuration (# pipes, ...) */
1279 r300_gpu_init(rdev);
1280 /* Initialize GART (initialize after TTM so we can allocate
1281 * memory through TTM but finalize after TTM) */
1282 if (rdev->flags & RADEON_IS_PCIE) {
1283 r = rv370_pcie_gart_enable(rdev);
1288 if (rdev->family == CHIP_R300 ||
1289 rdev->family == CHIP_R350 ||
1290 rdev->family == CHIP_RV350)
1291 r100_enable_bm(rdev);
1293 if (rdev->flags & RADEON_IS_PCI) {
1294 r = r100_pci_gart_enable(rdev);
1300 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1301 /* 1M ring buffer */
1302 r = r100_cp_init(rdev, 1024 * 1024);
1304 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
1307 r = r100_wb_init(rdev);
1309 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
1310 r = r100_ib_init(rdev);
1312 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1318 int r300_resume(struct radeon_device *rdev)
1320 /* Make sur GART are not working */
1321 if (rdev->flags & RADEON_IS_PCIE)
1322 rv370_pcie_gart_disable(rdev);
1323 if (rdev->flags & RADEON_IS_PCI)
1324 r100_pci_gart_disable(rdev);
1325 /* Resume clock before doing reset */
1326 r300_clock_startup(rdev);
1327 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1328 if (radeon_asic_reset(rdev)) {
1329 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1330 RREG32(R_000E40_RBBM_STATUS),
1331 RREG32(R_0007C0_CP_STAT));
1334 radeon_combios_asic_init(rdev->ddev);
1335 /* Resume clock after posting */
1336 r300_clock_startup(rdev);
1337 /* Initialize surface registers */
1338 radeon_surface_init(rdev);
1339 return r300_startup(rdev);
1342 int r300_suspend(struct radeon_device *rdev)
1344 r100_cp_disable(rdev);
1345 r100_wb_disable(rdev);
1346 r100_irq_disable(rdev);
1347 if (rdev->flags & RADEON_IS_PCIE)
1348 rv370_pcie_gart_disable(rdev);
1349 if (rdev->flags & RADEON_IS_PCI)
1350 r100_pci_gart_disable(rdev);
1354 void r300_fini(struct radeon_device *rdev)
1356 radeon_pm_fini(rdev);
1360 radeon_gem_fini(rdev);
1361 if (rdev->flags & RADEON_IS_PCIE)
1362 rv370_pcie_gart_fini(rdev);
1363 if (rdev->flags & RADEON_IS_PCI)
1364 r100_pci_gart_fini(rdev);
1365 radeon_agp_fini(rdev);
1366 radeon_irq_kms_fini(rdev);
1367 radeon_fence_driver_fini(rdev);
1368 radeon_bo_fini(rdev);
1369 radeon_atombios_fini(rdev);
1374 int r300_init(struct radeon_device *rdev)
1379 r100_vga_render_disable(rdev);
1380 /* Initialize scratch registers */
1381 radeon_scratch_init(rdev);
1382 /* Initialize surface registers */
1383 radeon_surface_init(rdev);
1384 /* TODO: disable VGA need to use VGA request */
1386 if (!radeon_get_bios(rdev)) {
1387 if (ASIC_IS_AVIVO(rdev))
1390 if (rdev->is_atom_bios) {
1391 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1394 r = radeon_combios_init(rdev);
1398 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1399 if (radeon_asic_reset(rdev)) {
1401 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1402 RREG32(R_000E40_RBBM_STATUS),
1403 RREG32(R_0007C0_CP_STAT));
1405 /* check if cards are posted or not */
1406 if (radeon_boot_test_post_card(rdev) == false)
1408 /* Set asic errata */
1410 /* Initialize clocks */
1411 radeon_get_clock_info(rdev->ddev);
1412 /* Initialize power management */
1413 radeon_pm_init(rdev);
1414 /* initialize AGP */
1415 if (rdev->flags & RADEON_IS_AGP) {
1416 r = radeon_agp_init(rdev);
1418 radeon_agp_disable(rdev);
1421 /* initialize memory controller */
1424 r = radeon_fence_driver_init(rdev);
1427 r = radeon_irq_kms_init(rdev);
1430 /* Memory manager */
1431 r = radeon_bo_init(rdev);
1434 if (rdev->flags & RADEON_IS_PCIE) {
1435 r = rv370_pcie_gart_init(rdev);
1439 if (rdev->flags & RADEON_IS_PCI) {
1440 r = r100_pci_gart_init(rdev);
1444 r300_set_reg_safe(rdev);
1445 rdev->accel_working = true;
1446 r = r300_startup(rdev);
1448 /* Somethings want wront with the accel init stop accel */
1449 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1453 radeon_irq_kms_fini(rdev);
1454 if (rdev->flags & RADEON_IS_PCIE)
1455 rv370_pcie_gart_fini(rdev);
1456 if (rdev->flags & RADEON_IS_PCI)
1457 r100_pci_gart_fini(rdev);
1458 radeon_agp_fini(rdev);
1459 rdev->accel_working = false;