Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes
[pandora-kernel.git] / drivers / gpu / drm / radeon / rv770.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/firmware.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 #include "drmP.h"
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include "radeon_drm.h"
35 #include "rv770d.h"
36 #include "atom.h"
37 #include "avivod.h"
38
39 #define R700_PFP_UCODE_SIZE 848
40 #define R700_PM4_UCODE_SIZE 1360
41
42 static void rv770_gpu_init(struct radeon_device *rdev);
43 void rv770_fini(struct radeon_device *rdev);
44
45 void rv770_pm_misc(struct radeon_device *rdev)
46 {
47
48 }
49
50 /*
51  * GART
52  */
53 int rv770_pcie_gart_enable(struct radeon_device *rdev)
54 {
55         u32 tmp;
56         int r, i;
57
58         if (rdev->gart.table.vram.robj == NULL) {
59                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
60                 return -EINVAL;
61         }
62         r = radeon_gart_table_vram_pin(rdev);
63         if (r)
64                 return r;
65         radeon_gart_restore(rdev);
66         /* Setup L2 cache */
67         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
68                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
69                                 EFFECTIVE_L2_QUEUE_SIZE(7));
70         WREG32(VM_L2_CNTL2, 0);
71         WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
72         /* Setup TLB control */
73         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
74                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
75                 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
76                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
77         WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
78         WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
79         WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
80         WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
81         WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
82         WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
83         WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
84         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
85         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
86         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
87         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
88                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
89         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
90                         (u32)(rdev->dummy_page.addr >> 12));
91         for (i = 1; i < 7; i++)
92                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
93
94         r600_pcie_gart_tlb_flush(rdev);
95         rdev->gart.ready = true;
96         return 0;
97 }
98
99 void rv770_pcie_gart_disable(struct radeon_device *rdev)
100 {
101         u32 tmp;
102         int i, r;
103
104         /* Disable all tables */
105         for (i = 0; i < 7; i++)
106                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
107
108         /* Setup L2 cache */
109         WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
110                                 EFFECTIVE_L2_QUEUE_SIZE(7));
111         WREG32(VM_L2_CNTL2, 0);
112         WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
113         /* Setup TLB control */
114         tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
115         WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
116         WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
117         WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
118         WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
119         WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
120         WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
121         WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
122         if (rdev->gart.table.vram.robj) {
123                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
124                 if (likely(r == 0)) {
125                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
126                         radeon_bo_unpin(rdev->gart.table.vram.robj);
127                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
128                 }
129         }
130 }
131
132 void rv770_pcie_gart_fini(struct radeon_device *rdev)
133 {
134         radeon_gart_fini(rdev);
135         rv770_pcie_gart_disable(rdev);
136         radeon_gart_table_vram_free(rdev);
137 }
138
139
140 void rv770_agp_enable(struct radeon_device *rdev)
141 {
142         u32 tmp;
143         int i;
144
145         /* Setup L2 cache */
146         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
147                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
148                                 EFFECTIVE_L2_QUEUE_SIZE(7));
149         WREG32(VM_L2_CNTL2, 0);
150         WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
151         /* Setup TLB control */
152         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
153                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
154                 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
155                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
156         WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
157         WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
158         WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
159         WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
160         WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
161         WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
162         WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
163         for (i = 0; i < 7; i++)
164                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
165 }
166
167 static void rv770_mc_program(struct radeon_device *rdev)
168 {
169         struct rv515_mc_save save;
170         u32 tmp;
171         int i, j;
172
173         /* Initialize HDP */
174         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
175                 WREG32((0x2c14 + j), 0x00000000);
176                 WREG32((0x2c18 + j), 0x00000000);
177                 WREG32((0x2c1c + j), 0x00000000);
178                 WREG32((0x2c20 + j), 0x00000000);
179                 WREG32((0x2c24 + j), 0x00000000);
180         }
181         WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
182
183         rv515_mc_stop(rdev, &save);
184         if (r600_mc_wait_for_idle(rdev)) {
185                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
186         }
187         /* Lockout access through VGA aperture*/
188         WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
189         /* Update configuration */
190         if (rdev->flags & RADEON_IS_AGP) {
191                 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
192                         /* VRAM before AGP */
193                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
194                                 rdev->mc.vram_start >> 12);
195                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
196                                 rdev->mc.gtt_end >> 12);
197                 } else {
198                         /* VRAM after AGP */
199                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
200                                 rdev->mc.gtt_start >> 12);
201                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
202                                 rdev->mc.vram_end >> 12);
203                 }
204         } else {
205                 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
206                         rdev->mc.vram_start >> 12);
207                 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
208                         rdev->mc.vram_end >> 12);
209         }
210         WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
211         tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
212         tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
213         WREG32(MC_VM_FB_LOCATION, tmp);
214         WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
215         WREG32(HDP_NONSURFACE_INFO, (2 << 7));
216         WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
217         if (rdev->flags & RADEON_IS_AGP) {
218                 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
219                 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
220                 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
221         } else {
222                 WREG32(MC_VM_AGP_BASE, 0);
223                 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
224                 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
225         }
226         if (r600_mc_wait_for_idle(rdev)) {
227                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
228         }
229         rv515_mc_resume(rdev, &save);
230         /* we need to own VRAM, so turn off the VGA renderer here
231          * to stop it overwriting our objects */
232         rv515_vga_render_disable(rdev);
233 }
234
235
236 /*
237  * CP.
238  */
239 void r700_cp_stop(struct radeon_device *rdev)
240 {
241         WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
242 }
243
244 static int rv770_cp_load_microcode(struct radeon_device *rdev)
245 {
246         const __be32 *fw_data;
247         int i;
248
249         if (!rdev->me_fw || !rdev->pfp_fw)
250                 return -EINVAL;
251
252         r700_cp_stop(rdev);
253         WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
254
255         /* Reset cp */
256         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
257         RREG32(GRBM_SOFT_RESET);
258         mdelay(15);
259         WREG32(GRBM_SOFT_RESET, 0);
260
261         fw_data = (const __be32 *)rdev->pfp_fw->data;
262         WREG32(CP_PFP_UCODE_ADDR, 0);
263         for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
264                 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
265         WREG32(CP_PFP_UCODE_ADDR, 0);
266
267         fw_data = (const __be32 *)rdev->me_fw->data;
268         WREG32(CP_ME_RAM_WADDR, 0);
269         for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
270                 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
271
272         WREG32(CP_PFP_UCODE_ADDR, 0);
273         WREG32(CP_ME_RAM_WADDR, 0);
274         WREG32(CP_ME_RAM_RADDR, 0);
275         return 0;
276 }
277
278 void r700_cp_fini(struct radeon_device *rdev)
279 {
280         r700_cp_stop(rdev);
281         radeon_ring_fini(rdev);
282 }
283
284 /*
285  * Core functions
286  */
287 static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
288                                              u32 num_tile_pipes,
289                                              u32 num_backends,
290                                              u32 backend_disable_mask)
291 {
292         u32 backend_map = 0;
293         u32 enabled_backends_mask;
294         u32 enabled_backends_count;
295         u32 cur_pipe;
296         u32 swizzle_pipe[R7XX_MAX_PIPES];
297         u32 cur_backend;
298         u32 i;
299         bool force_no_swizzle;
300
301         if (num_tile_pipes > R7XX_MAX_PIPES)
302                 num_tile_pipes = R7XX_MAX_PIPES;
303         if (num_tile_pipes < 1)
304                 num_tile_pipes = 1;
305         if (num_backends > R7XX_MAX_BACKENDS)
306                 num_backends = R7XX_MAX_BACKENDS;
307         if (num_backends < 1)
308                 num_backends = 1;
309
310         enabled_backends_mask = 0;
311         enabled_backends_count = 0;
312         for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
313                 if (((backend_disable_mask >> i) & 1) == 0) {
314                         enabled_backends_mask |= (1 << i);
315                         ++enabled_backends_count;
316                 }
317                 if (enabled_backends_count == num_backends)
318                         break;
319         }
320
321         if (enabled_backends_count == 0) {
322                 enabled_backends_mask = 1;
323                 enabled_backends_count = 1;
324         }
325
326         if (enabled_backends_count != num_backends)
327                 num_backends = enabled_backends_count;
328
329         switch (rdev->family) {
330         case CHIP_RV770:
331         case CHIP_RV730:
332                 force_no_swizzle = false;
333                 break;
334         case CHIP_RV710:
335         case CHIP_RV740:
336         default:
337                 force_no_swizzle = true;
338                 break;
339         }
340
341         memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
342         switch (num_tile_pipes) {
343         case 1:
344                 swizzle_pipe[0] = 0;
345                 break;
346         case 2:
347                 swizzle_pipe[0] = 0;
348                 swizzle_pipe[1] = 1;
349                 break;
350         case 3:
351                 if (force_no_swizzle) {
352                         swizzle_pipe[0] = 0;
353                         swizzle_pipe[1] = 1;
354                         swizzle_pipe[2] = 2;
355                 } else {
356                         swizzle_pipe[0] = 0;
357                         swizzle_pipe[1] = 2;
358                         swizzle_pipe[2] = 1;
359                 }
360                 break;
361         case 4:
362                 if (force_no_swizzle) {
363                         swizzle_pipe[0] = 0;
364                         swizzle_pipe[1] = 1;
365                         swizzle_pipe[2] = 2;
366                         swizzle_pipe[3] = 3;
367                 } else {
368                         swizzle_pipe[0] = 0;
369                         swizzle_pipe[1] = 2;
370                         swizzle_pipe[2] = 3;
371                         swizzle_pipe[3] = 1;
372                 }
373                 break;
374         case 5:
375                 if (force_no_swizzle) {
376                         swizzle_pipe[0] = 0;
377                         swizzle_pipe[1] = 1;
378                         swizzle_pipe[2] = 2;
379                         swizzle_pipe[3] = 3;
380                         swizzle_pipe[4] = 4;
381                 } else {
382                         swizzle_pipe[0] = 0;
383                         swizzle_pipe[1] = 2;
384                         swizzle_pipe[2] = 4;
385                         swizzle_pipe[3] = 1;
386                         swizzle_pipe[4] = 3;
387                 }
388                 break;
389         case 6:
390                 if (force_no_swizzle) {
391                         swizzle_pipe[0] = 0;
392                         swizzle_pipe[1] = 1;
393                         swizzle_pipe[2] = 2;
394                         swizzle_pipe[3] = 3;
395                         swizzle_pipe[4] = 4;
396                         swizzle_pipe[5] = 5;
397                 } else {
398                         swizzle_pipe[0] = 0;
399                         swizzle_pipe[1] = 2;
400                         swizzle_pipe[2] = 4;
401                         swizzle_pipe[3] = 5;
402                         swizzle_pipe[4] = 3;
403                         swizzle_pipe[5] = 1;
404                 }
405                 break;
406         case 7:
407                 if (force_no_swizzle) {
408                         swizzle_pipe[0] = 0;
409                         swizzle_pipe[1] = 1;
410                         swizzle_pipe[2] = 2;
411                         swizzle_pipe[3] = 3;
412                         swizzle_pipe[4] = 4;
413                         swizzle_pipe[5] = 5;
414                         swizzle_pipe[6] = 6;
415                 } else {
416                         swizzle_pipe[0] = 0;
417                         swizzle_pipe[1] = 2;
418                         swizzle_pipe[2] = 4;
419                         swizzle_pipe[3] = 6;
420                         swizzle_pipe[4] = 3;
421                         swizzle_pipe[5] = 1;
422                         swizzle_pipe[6] = 5;
423                 }
424                 break;
425         case 8:
426                 if (force_no_swizzle) {
427                         swizzle_pipe[0] = 0;
428                         swizzle_pipe[1] = 1;
429                         swizzle_pipe[2] = 2;
430                         swizzle_pipe[3] = 3;
431                         swizzle_pipe[4] = 4;
432                         swizzle_pipe[5] = 5;
433                         swizzle_pipe[6] = 6;
434                         swizzle_pipe[7] = 7;
435                 } else {
436                         swizzle_pipe[0] = 0;
437                         swizzle_pipe[1] = 2;
438                         swizzle_pipe[2] = 4;
439                         swizzle_pipe[3] = 6;
440                         swizzle_pipe[4] = 3;
441                         swizzle_pipe[5] = 1;
442                         swizzle_pipe[6] = 7;
443                         swizzle_pipe[7] = 5;
444                 }
445                 break;
446         }
447
448         cur_backend = 0;
449         for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
450                 while (((1 << cur_backend) & enabled_backends_mask) == 0)
451                         cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
452
453                 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
454
455                 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
456         }
457
458         return backend_map;
459 }
460
461 static void rv770_gpu_init(struct radeon_device *rdev)
462 {
463         int i, j, num_qd_pipes;
464         u32 ta_aux_cntl;
465         u32 sx_debug_1;
466         u32 smx_dc_ctl0;
467         u32 db_debug3;
468         u32 num_gs_verts_per_thread;
469         u32 vgt_gs_per_es;
470         u32 gs_prim_buffer_depth = 0;
471         u32 sq_ms_fifo_sizes;
472         u32 sq_config;
473         u32 sq_thread_resource_mgmt;
474         u32 hdp_host_path_cntl;
475         u32 sq_dyn_gpr_size_simd_ab_0;
476         u32 backend_map;
477         u32 gb_tiling_config = 0;
478         u32 cc_rb_backend_disable = 0;
479         u32 cc_gc_shader_pipe_config = 0;
480         u32 mc_arb_ramcfg;
481         u32 db_debug4;
482
483         /* setup chip specs */
484         switch (rdev->family) {
485         case CHIP_RV770:
486                 rdev->config.rv770.max_pipes = 4;
487                 rdev->config.rv770.max_tile_pipes = 8;
488                 rdev->config.rv770.max_simds = 10;
489                 rdev->config.rv770.max_backends = 4;
490                 rdev->config.rv770.max_gprs = 256;
491                 rdev->config.rv770.max_threads = 248;
492                 rdev->config.rv770.max_stack_entries = 512;
493                 rdev->config.rv770.max_hw_contexts = 8;
494                 rdev->config.rv770.max_gs_threads = 16 * 2;
495                 rdev->config.rv770.sx_max_export_size = 128;
496                 rdev->config.rv770.sx_max_export_pos_size = 16;
497                 rdev->config.rv770.sx_max_export_smx_size = 112;
498                 rdev->config.rv770.sq_num_cf_insts = 2;
499
500                 rdev->config.rv770.sx_num_of_sets = 7;
501                 rdev->config.rv770.sc_prim_fifo_size = 0xF9;
502                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
503                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
504                 break;
505         case CHIP_RV730:
506                 rdev->config.rv770.max_pipes = 2;
507                 rdev->config.rv770.max_tile_pipes = 4;
508                 rdev->config.rv770.max_simds = 8;
509                 rdev->config.rv770.max_backends = 2;
510                 rdev->config.rv770.max_gprs = 128;
511                 rdev->config.rv770.max_threads = 248;
512                 rdev->config.rv770.max_stack_entries = 256;
513                 rdev->config.rv770.max_hw_contexts = 8;
514                 rdev->config.rv770.max_gs_threads = 16 * 2;
515                 rdev->config.rv770.sx_max_export_size = 256;
516                 rdev->config.rv770.sx_max_export_pos_size = 32;
517                 rdev->config.rv770.sx_max_export_smx_size = 224;
518                 rdev->config.rv770.sq_num_cf_insts = 2;
519
520                 rdev->config.rv770.sx_num_of_sets = 7;
521                 rdev->config.rv770.sc_prim_fifo_size = 0xf9;
522                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
523                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
524                 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
525                         rdev->config.rv770.sx_max_export_pos_size -= 16;
526                         rdev->config.rv770.sx_max_export_smx_size += 16;
527                 }
528                 break;
529         case CHIP_RV710:
530                 rdev->config.rv770.max_pipes = 2;
531                 rdev->config.rv770.max_tile_pipes = 2;
532                 rdev->config.rv770.max_simds = 2;
533                 rdev->config.rv770.max_backends = 1;
534                 rdev->config.rv770.max_gprs = 256;
535                 rdev->config.rv770.max_threads = 192;
536                 rdev->config.rv770.max_stack_entries = 256;
537                 rdev->config.rv770.max_hw_contexts = 4;
538                 rdev->config.rv770.max_gs_threads = 8 * 2;
539                 rdev->config.rv770.sx_max_export_size = 128;
540                 rdev->config.rv770.sx_max_export_pos_size = 16;
541                 rdev->config.rv770.sx_max_export_smx_size = 112;
542                 rdev->config.rv770.sq_num_cf_insts = 1;
543
544                 rdev->config.rv770.sx_num_of_sets = 7;
545                 rdev->config.rv770.sc_prim_fifo_size = 0x40;
546                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
547                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
548                 break;
549         case CHIP_RV740:
550                 rdev->config.rv770.max_pipes = 4;
551                 rdev->config.rv770.max_tile_pipes = 4;
552                 rdev->config.rv770.max_simds = 8;
553                 rdev->config.rv770.max_backends = 4;
554                 rdev->config.rv770.max_gprs = 256;
555                 rdev->config.rv770.max_threads = 248;
556                 rdev->config.rv770.max_stack_entries = 512;
557                 rdev->config.rv770.max_hw_contexts = 8;
558                 rdev->config.rv770.max_gs_threads = 16 * 2;
559                 rdev->config.rv770.sx_max_export_size = 256;
560                 rdev->config.rv770.sx_max_export_pos_size = 32;
561                 rdev->config.rv770.sx_max_export_smx_size = 224;
562                 rdev->config.rv770.sq_num_cf_insts = 2;
563
564                 rdev->config.rv770.sx_num_of_sets = 7;
565                 rdev->config.rv770.sc_prim_fifo_size = 0x100;
566                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
567                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
568
569                 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
570                         rdev->config.rv770.sx_max_export_pos_size -= 16;
571                         rdev->config.rv770.sx_max_export_smx_size += 16;
572                 }
573                 break;
574         default:
575                 break;
576         }
577
578         /* Initialize HDP */
579         j = 0;
580         for (i = 0; i < 32; i++) {
581                 WREG32((0x2c14 + j), 0x00000000);
582                 WREG32((0x2c18 + j), 0x00000000);
583                 WREG32((0x2c1c + j), 0x00000000);
584                 WREG32((0x2c20 + j), 0x00000000);
585                 WREG32((0x2c24 + j), 0x00000000);
586                 j += 0x18;
587         }
588
589         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
590
591         /* setup tiling, simd, pipe config */
592         mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
593
594         switch (rdev->config.rv770.max_tile_pipes) {
595         case 1:
596         default:
597                 gb_tiling_config |= PIPE_TILING(0);
598                 break;
599         case 2:
600                 gb_tiling_config |= PIPE_TILING(1);
601                 break;
602         case 4:
603                 gb_tiling_config |= PIPE_TILING(2);
604                 break;
605         case 8:
606                 gb_tiling_config |= PIPE_TILING(3);
607                 break;
608         }
609         rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
610
611         if (rdev->family == CHIP_RV770)
612                 gb_tiling_config |= BANK_TILING(1);
613         else
614                 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
615         rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
616
617         gb_tiling_config |= GROUP_SIZE(0);
618         rdev->config.rv770.tiling_group_size = 256;
619
620         if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
621                 gb_tiling_config |= ROW_TILING(3);
622                 gb_tiling_config |= SAMPLE_SPLIT(3);
623         } else {
624                 gb_tiling_config |=
625                         ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
626                 gb_tiling_config |=
627                         SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
628         }
629
630         gb_tiling_config |= BANK_SWAPS(1);
631
632         cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
633         cc_rb_backend_disable |=
634                 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
635
636         cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
637         cc_gc_shader_pipe_config |=
638                 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
639         cc_gc_shader_pipe_config |=
640                 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
641
642         if (rdev->family == CHIP_RV740)
643                 backend_map = 0x28;
644         else
645                 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
646                                                                 rdev->config.rv770.max_tile_pipes,
647                                                                 (R7XX_MAX_BACKENDS -
648                                                                  r600_count_pipe_bits((cc_rb_backend_disable &
649                                                                                        R7XX_MAX_BACKENDS_MASK) >> 16)),
650                                                                 (cc_rb_backend_disable >> 16));
651         gb_tiling_config |= BACKEND_MAP(backend_map);
652
653
654         WREG32(GB_TILING_CONFIG, gb_tiling_config);
655         WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
656         WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
657
658         WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
659         WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
660         WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
661         WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
662
663         WREG32(CGTS_SYS_TCC_DISABLE, 0);
664         WREG32(CGTS_TCC_DISABLE, 0);
665         WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
666         WREG32(CGTS_USER_TCC_DISABLE, 0);
667
668         num_qd_pipes =
669                 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
670         WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
671         WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
672
673         /* set HW defaults for 3D engine */
674         WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
675                                      ROQ_IB2_START(0x2b)));
676
677         WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
678
679         ta_aux_cntl = RREG32(TA_CNTL_AUX);
680         WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
681
682         sx_debug_1 = RREG32(SX_DEBUG_1);
683         sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
684         WREG32(SX_DEBUG_1, sx_debug_1);
685
686         smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
687         smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
688         smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
689         WREG32(SMX_DC_CTL0, smx_dc_ctl0);
690
691         if (rdev->family != CHIP_RV740)
692                 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
693                                        GS_FLUSH_CTL(4) |
694                                        ACK_FLUSH_CTL(3) |
695                                        SYNC_FLUSH_CTL));
696
697         db_debug3 = RREG32(DB_DEBUG3);
698         db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
699         switch (rdev->family) {
700         case CHIP_RV770:
701         case CHIP_RV740:
702                 db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
703                 break;
704         case CHIP_RV710:
705         case CHIP_RV730:
706         default:
707                 db_debug3 |= DB_CLK_OFF_DELAY(2);
708                 break;
709         }
710         WREG32(DB_DEBUG3, db_debug3);
711
712         if (rdev->family != CHIP_RV770) {
713                 db_debug4 = RREG32(DB_DEBUG4);
714                 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
715                 WREG32(DB_DEBUG4, db_debug4);
716         }
717
718         WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
719                                         POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
720                                         SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
721
722         WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
723                                  SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
724                                  SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
725
726         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
727
728         WREG32(VGT_NUM_INSTANCES, 1);
729
730         WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
731
732         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
733
734         WREG32(CP_PERFMON_CNTL, 0);
735
736         sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
737                             DONE_FIFO_HIWATER(0xe0) |
738                             ALU_UPDATE_FIFO_HIWATER(0x8));
739         switch (rdev->family) {
740         case CHIP_RV770:
741         case CHIP_RV730:
742         case CHIP_RV710:
743                 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
744                 break;
745         case CHIP_RV740:
746         default:
747                 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
748                 break;
749         }
750         WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
751
752         /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
753          * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
754          */
755         sq_config = RREG32(SQ_CONFIG);
756         sq_config &= ~(PS_PRIO(3) |
757                        VS_PRIO(3) |
758                        GS_PRIO(3) |
759                        ES_PRIO(3));
760         sq_config |= (DX9_CONSTS |
761                       VC_ENABLE |
762                       EXPORT_SRC_C |
763                       PS_PRIO(0) |
764                       VS_PRIO(1) |
765                       GS_PRIO(2) |
766                       ES_PRIO(3));
767         if (rdev->family == CHIP_RV710)
768                 /* no vertex cache */
769                 sq_config &= ~VC_ENABLE;
770
771         WREG32(SQ_CONFIG, sq_config);
772
773         WREG32(SQ_GPR_RESOURCE_MGMT_1,  (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
774                                          NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
775                                          NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
776
777         WREG32(SQ_GPR_RESOURCE_MGMT_2,  (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
778                                          NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
779
780         sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
781                                    NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
782                                    NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
783         if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
784                 sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
785         else
786                 sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
787         WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
788
789         WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
790                                                      NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
791
792         WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
793                                                      NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
794
795         sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
796                                      SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
797                                      SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
798                                      SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
799
800         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
801         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
802         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
803         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
804         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
805         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
806         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
807         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
808
809         WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
810                                           FORCE_EOV_MAX_REZ_CNT(255)));
811
812         if (rdev->family == CHIP_RV710)
813                 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
814                                                 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
815         else
816                 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
817                                                 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
818
819         switch (rdev->family) {
820         case CHIP_RV770:
821         case CHIP_RV730:
822         case CHIP_RV740:
823                 gs_prim_buffer_depth = 384;
824                 break;
825         case CHIP_RV710:
826                 gs_prim_buffer_depth = 128;
827                 break;
828         default:
829                 break;
830         }
831
832         num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
833         vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
834         /* Max value for this is 256 */
835         if (vgt_gs_per_es > 256)
836                 vgt_gs_per_es = 256;
837
838         WREG32(VGT_ES_PER_GS, 128);
839         WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
840         WREG32(VGT_GS_PER_VS, 2);
841
842         /* more default values. 2D/3D driver should adjust as needed */
843         WREG32(VGT_GS_VERTEX_REUSE, 16);
844         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
845         WREG32(VGT_STRMOUT_EN, 0);
846         WREG32(SX_MISC, 0);
847         WREG32(PA_SC_MODE_CNTL, 0);
848         WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
849         WREG32(PA_SC_AA_CONFIG, 0);
850         WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
851         WREG32(PA_SC_LINE_STIPPLE, 0);
852         WREG32(SPI_INPUT_Z, 0);
853         WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
854         WREG32(CB_COLOR7_FRAG, 0);
855
856         /* clear render buffer base addresses */
857         WREG32(CB_COLOR0_BASE, 0);
858         WREG32(CB_COLOR1_BASE, 0);
859         WREG32(CB_COLOR2_BASE, 0);
860         WREG32(CB_COLOR3_BASE, 0);
861         WREG32(CB_COLOR4_BASE, 0);
862         WREG32(CB_COLOR5_BASE, 0);
863         WREG32(CB_COLOR6_BASE, 0);
864         WREG32(CB_COLOR7_BASE, 0);
865
866         WREG32(TCP_CNTL, 0);
867
868         hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
869         WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
870
871         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
872
873         WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
874                                           NUM_CLIP_SEQ(3)));
875
876 }
877
878 int rv770_mc_init(struct radeon_device *rdev)
879 {
880         u32 tmp;
881         int chansize, numchan;
882
883         /* Get VRAM informations */
884         rdev->mc.vram_is_ddr = true;
885         tmp = RREG32(MC_ARB_RAMCFG);
886         if (tmp & CHANSIZE_OVERRIDE) {
887                 chansize = 16;
888         } else if (tmp & CHANSIZE_MASK) {
889                 chansize = 64;
890         } else {
891                 chansize = 32;
892         }
893         tmp = RREG32(MC_SHARED_CHMAP);
894         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
895         case 0:
896         default:
897                 numchan = 1;
898                 break;
899         case 1:
900                 numchan = 2;
901                 break;
902         case 2:
903                 numchan = 4;
904                 break;
905         case 3:
906                 numchan = 8;
907                 break;
908         }
909         rdev->mc.vram_width = numchan * chansize;
910         /* Could aper size report 0 ? */
911         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
912         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
913         /* Setup GPU memory space */
914         rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
915         rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
916         rdev->mc.visible_vram_size = rdev->mc.aper_size;
917         r600_vram_gtt_location(rdev, &rdev->mc);
918         radeon_update_bandwidth_info(rdev);
919
920         return 0;
921 }
922
923 static int rv770_startup(struct radeon_device *rdev)
924 {
925         int r;
926
927         if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
928                 r = r600_init_microcode(rdev);
929                 if (r) {
930                         DRM_ERROR("Failed to load firmware!\n");
931                         return r;
932                 }
933         }
934
935         rv770_mc_program(rdev);
936         if (rdev->flags & RADEON_IS_AGP) {
937                 rv770_agp_enable(rdev);
938         } else {
939                 r = rv770_pcie_gart_enable(rdev);
940                 if (r)
941                         return r;
942         }
943         rv770_gpu_init(rdev);
944         r = r600_blit_init(rdev);
945         if (r) {
946                 r600_blit_fini(rdev);
947                 rdev->asic->copy = NULL;
948                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
949         }
950         /* pin copy shader into vram */
951         if (rdev->r600_blit.shader_obj) {
952                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
953                 if (unlikely(r != 0))
954                         return r;
955                 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
956                                 &rdev->r600_blit.shader_gpu_addr);
957                 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
958                 if (r) {
959                         DRM_ERROR("failed to pin blit object %d\n", r);
960                         return r;
961                 }
962         }
963         /* Enable IRQ */
964         r = r600_irq_init(rdev);
965         if (r) {
966                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
967                 radeon_irq_kms_fini(rdev);
968                 return r;
969         }
970         r600_irq_set(rdev);
971
972         r = radeon_ring_init(rdev, rdev->cp.ring_size);
973         if (r)
974                 return r;
975         r = rv770_cp_load_microcode(rdev);
976         if (r)
977                 return r;
978         r = r600_cp_resume(rdev);
979         if (r)
980                 return r;
981         /* write back buffer are not vital so don't worry about failure */
982         r600_wb_enable(rdev);
983         return 0;
984 }
985
986 int rv770_resume(struct radeon_device *rdev)
987 {
988         int r;
989
990         /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
991          * posting will perform necessary task to bring back GPU into good
992          * shape.
993          */
994         /* post card */
995         atom_asic_init(rdev->mode_info.atom_context);
996         /* Initialize clocks */
997         r = radeon_clocks_init(rdev);
998         if (r) {
999                 return r;
1000         }
1001
1002         r = rv770_startup(rdev);
1003         if (r) {
1004                 DRM_ERROR("r600 startup failed on resume\n");
1005                 return r;
1006         }
1007
1008         r = r600_ib_test(rdev);
1009         if (r) {
1010                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1011                 return r;
1012         }
1013
1014         r = r600_audio_init(rdev);
1015         if (r) {
1016                 dev_err(rdev->dev, "radeon: audio init failed\n");
1017                 return r;
1018         }
1019
1020         return r;
1021
1022 }
1023
1024 int rv770_suspend(struct radeon_device *rdev)
1025 {
1026         int r;
1027
1028         r600_audio_fini(rdev);
1029         /* FIXME: we should wait for ring to be empty */
1030         r700_cp_stop(rdev);
1031         rdev->cp.ready = false;
1032         r600_irq_suspend(rdev);
1033         r600_wb_disable(rdev);
1034         rv770_pcie_gart_disable(rdev);
1035         /* unpin shaders bo */
1036         if (rdev->r600_blit.shader_obj) {
1037                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1038                 if (likely(r == 0)) {
1039                         radeon_bo_unpin(rdev->r600_blit.shader_obj);
1040                         radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1041                 }
1042         }
1043         return 0;
1044 }
1045
1046 /* Plan is to move initialization in that function and use
1047  * helper function so that radeon_device_init pretty much
1048  * do nothing more than calling asic specific function. This
1049  * should also allow to remove a bunch of callback function
1050  * like vram_info.
1051  */
1052 int rv770_init(struct radeon_device *rdev)
1053 {
1054         int r;
1055
1056         r = radeon_dummy_page_init(rdev);
1057         if (r)
1058                 return r;
1059         /* This don't do much */
1060         r = radeon_gem_init(rdev);
1061         if (r)
1062                 return r;
1063         /* Read BIOS */
1064         if (!radeon_get_bios(rdev)) {
1065                 if (ASIC_IS_AVIVO(rdev))
1066                         return -EINVAL;
1067         }
1068         /* Must be an ATOMBIOS */
1069         if (!rdev->is_atom_bios) {
1070                 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1071                 return -EINVAL;
1072         }
1073         r = radeon_atombios_init(rdev);
1074         if (r)
1075                 return r;
1076         /* Post card if necessary */
1077         if (!r600_card_posted(rdev)) {
1078                 if (!rdev->bios) {
1079                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1080                         return -EINVAL;
1081                 }
1082                 DRM_INFO("GPU not posted. posting now...\n");
1083                 atom_asic_init(rdev->mode_info.atom_context);
1084         }
1085         /* Initialize scratch registers */
1086         r600_scratch_init(rdev);
1087         /* Initialize surface registers */
1088         radeon_surface_init(rdev);
1089         /* Initialize clocks */
1090         radeon_get_clock_info(rdev->ddev);
1091         r = radeon_clocks_init(rdev);
1092         if (r)
1093                 return r;
1094         /* Fence driver */
1095         r = radeon_fence_driver_init(rdev);
1096         if (r)
1097                 return r;
1098         /* initialize AGP */
1099         if (rdev->flags & RADEON_IS_AGP) {
1100                 r = radeon_agp_init(rdev);
1101                 if (r)
1102                         radeon_agp_disable(rdev);
1103         }
1104         r = rv770_mc_init(rdev);
1105         if (r)
1106                 return r;
1107         /* Memory manager */
1108         r = radeon_bo_init(rdev);
1109         if (r)
1110                 return r;
1111
1112         r = radeon_irq_kms_init(rdev);
1113         if (r)
1114                 return r;
1115
1116         rdev->cp.ring_obj = NULL;
1117         r600_ring_init(rdev, 1024 * 1024);
1118
1119         rdev->ih.ring_obj = NULL;
1120         r600_ih_ring_init(rdev, 64 * 1024);
1121
1122         r = r600_pcie_gart_init(rdev);
1123         if (r)
1124                 return r;
1125
1126         rdev->accel_working = true;
1127         r = rv770_startup(rdev);
1128         if (r) {
1129                 dev_err(rdev->dev, "disabling GPU acceleration\n");
1130                 r700_cp_fini(rdev);
1131                 r600_wb_fini(rdev);
1132                 r600_irq_fini(rdev);
1133                 radeon_irq_kms_fini(rdev);
1134                 rv770_pcie_gart_fini(rdev);
1135                 rdev->accel_working = false;
1136         }
1137         if (rdev->accel_working) {
1138                 r = radeon_ib_pool_init(rdev);
1139                 if (r) {
1140                         dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1141                         rdev->accel_working = false;
1142                 } else {
1143                         r = r600_ib_test(rdev);
1144                         if (r) {
1145                                 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1146                                 rdev->accel_working = false;
1147                         }
1148                 }
1149         }
1150
1151         r = r600_audio_init(rdev);
1152         if (r) {
1153                 dev_err(rdev->dev, "radeon: audio init failed\n");
1154                 return r;
1155         }
1156
1157         return 0;
1158 }
1159
1160 void rv770_fini(struct radeon_device *rdev)
1161 {
1162         r600_blit_fini(rdev);
1163         r700_cp_fini(rdev);
1164         r600_wb_fini(rdev);
1165         r600_irq_fini(rdev);
1166         radeon_irq_kms_fini(rdev);
1167         rv770_pcie_gart_fini(rdev);
1168         radeon_gem_fini(rdev);
1169         radeon_fence_driver_fini(rdev);
1170         radeon_clocks_fini(rdev);
1171         radeon_agp_fini(rdev);
1172         radeon_bo_fini(rdev);
1173         radeon_atombios_fini(rdev);
1174         kfree(rdev->bios);
1175         rdev->bios = NULL;
1176         radeon_dummy_page_fini(rdev);
1177 }