Merge branch 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[pandora-kernel.git] / drivers / gpu / drm / radeon / rv515.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "rv515r.h"
31 #include "radeon.h"
32 #include "radeon_share.h"
33
34 /* rv515 depends on : */
35 void r100_hdp_reset(struct radeon_device *rdev);
36 int r100_cp_reset(struct radeon_device *rdev);
37 int r100_rb2d_reset(struct radeon_device *rdev);
38 int r100_gui_wait_for_idle(struct radeon_device *rdev);
39 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40 int rv370_pcie_gart_enable(struct radeon_device *rdev);
41 void rv370_pcie_gart_disable(struct radeon_device *rdev);
42 void r420_pipes_init(struct radeon_device *rdev);
43 void rs600_mc_disable_clients(struct radeon_device *rdev);
44 void rs600_disable_vga(struct radeon_device *rdev);
45
46 /* This files gather functions specifics to:
47  * rv515
48  *
49  * Some of these functions might be used by newer ASICs.
50  */
51 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
52 int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
53 void rv515_gpu_init(struct radeon_device *rdev);
54 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
55
56
57 /*
58  * MC
59  */
60 int rv515_mc_init(struct radeon_device *rdev)
61 {
62         uint32_t tmp;
63         int r;
64
65         if (r100_debugfs_rbbm_init(rdev)) {
66                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
67         }
68         if (rv515_debugfs_pipes_info_init(rdev)) {
69                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
70         }
71         if (rv515_debugfs_ga_info_init(rdev)) {
72                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
73         }
74
75         rv515_gpu_init(rdev);
76         rv370_pcie_gart_disable(rdev);
77
78         /* Setup GPU memory space */
79         rdev->mc.vram_location = 0xFFFFFFFFUL;
80         rdev->mc.gtt_location = 0xFFFFFFFFUL;
81         if (rdev->flags & RADEON_IS_AGP) {
82                 r = radeon_agp_init(rdev);
83                 if (r) {
84                         printk(KERN_WARNING "[drm] Disabling AGP\n");
85                         rdev->flags &= ~RADEON_IS_AGP;
86                         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
87                 } else {
88                         rdev->mc.gtt_location = rdev->mc.agp_base;
89                 }
90         }
91         r = radeon_mc_setup(rdev);
92         if (r) {
93                 return r;
94         }
95
96         /* Program GPU memory space */
97         rs600_mc_disable_clients(rdev);
98         if (rv515_mc_wait_for_idle(rdev)) {
99                 printk(KERN_WARNING "Failed to wait MC idle while "
100                        "programming pipes. Bad things might happen.\n");
101         }
102         /* Write VRAM size in case we are limiting it */
103         WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
104         tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
105         WREG32(0x134, tmp);
106         tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
107         tmp = REG_SET(MC_FB_TOP, tmp >> 16);
108         tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
109         WREG32_MC(MC_FB_LOCATION, tmp);
110         WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
111         WREG32(0x310, rdev->mc.vram_location);
112         if (rdev->flags & RADEON_IS_AGP) {
113                 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
114                 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
115                 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
116                 WREG32_MC(MC_AGP_LOCATION, tmp);
117                 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
118                 WREG32_MC(MC_AGP_BASE_2, 0);
119         } else {
120                 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
121                 WREG32_MC(MC_AGP_BASE, 0);
122                 WREG32_MC(MC_AGP_BASE_2, 0);
123         }
124         return 0;
125 }
126
127 void rv515_mc_fini(struct radeon_device *rdev)
128 {
129         rv370_pcie_gart_disable(rdev);
130         radeon_gart_table_vram_free(rdev);
131         radeon_gart_fini(rdev);
132 }
133
134
135 /*
136  * Global GPU functions
137  */
138 void rv515_ring_start(struct radeon_device *rdev)
139 {
140         int r;
141
142         r = radeon_ring_lock(rdev, 64);
143         if (r) {
144                 return;
145         }
146         radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
147         radeon_ring_write(rdev,
148                           ISYNC_ANY2D_IDLE3D |
149                           ISYNC_ANY3D_IDLE2D |
150                           ISYNC_WAIT_IDLEGUI |
151                           ISYNC_CPSCRATCH_IDLEGUI);
152         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
153         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
154         radeon_ring_write(rdev, PACKET0(0x170C, 0));
155         radeon_ring_write(rdev, 1 << 31);
156         radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
157         radeon_ring_write(rdev, 0);
158         radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
159         radeon_ring_write(rdev, 0);
160         radeon_ring_write(rdev, PACKET0(0x42C8, 0));
161         radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
162         radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
163         radeon_ring_write(rdev, 0);
164         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
165         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
166         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
167         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
168         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
169         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
170         radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
171         radeon_ring_write(rdev, 0);
172         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
173         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
174         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
175         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
176         radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
177         radeon_ring_write(rdev,
178                           ((6 << MS_X0_SHIFT) |
179                            (6 << MS_Y0_SHIFT) |
180                            (6 << MS_X1_SHIFT) |
181                            (6 << MS_Y1_SHIFT) |
182                            (6 << MS_X2_SHIFT) |
183                            (6 << MS_Y2_SHIFT) |
184                            (6 << MSBD0_Y_SHIFT) |
185                            (6 << MSBD0_X_SHIFT)));
186         radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
187         radeon_ring_write(rdev,
188                           ((6 << MS_X3_SHIFT) |
189                            (6 << MS_Y3_SHIFT) |
190                            (6 << MS_X4_SHIFT) |
191                            (6 << MS_Y4_SHIFT) |
192                            (6 << MS_X5_SHIFT) |
193                            (6 << MS_Y5_SHIFT) |
194                            (6 << MSBD1_SHIFT)));
195         radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
196         radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
197         radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
198         radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
199         radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
200         radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
201         radeon_ring_write(rdev, PACKET0(0x20C8, 0));
202         radeon_ring_write(rdev, 0);
203         radeon_ring_unlock_commit(rdev);
204 }
205
206 void rv515_errata(struct radeon_device *rdev)
207 {
208         rdev->pll_errata = 0;
209 }
210
211 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
212 {
213         unsigned i;
214         uint32_t tmp;
215
216         for (i = 0; i < rdev->usec_timeout; i++) {
217                 /* read MC_STATUS */
218                 tmp = RREG32_MC(MC_STATUS);
219                 if (tmp & MC_STATUS_IDLE) {
220                         return 0;
221                 }
222                 DRM_UDELAY(1);
223         }
224         return -1;
225 }
226
227 void rv515_gpu_init(struct radeon_device *rdev)
228 {
229         unsigned pipe_select_current, gb_pipe_select, tmp;
230
231         r100_hdp_reset(rdev);
232         r100_rb2d_reset(rdev);
233
234         if (r100_gui_wait_for_idle(rdev)) {
235                 printk(KERN_WARNING "Failed to wait GUI idle while "
236                        "reseting GPU. Bad things might happen.\n");
237         }
238
239         rs600_disable_vga(rdev);
240
241         r420_pipes_init(rdev);
242         gb_pipe_select = RREG32(0x402C);
243         tmp = RREG32(0x170C);
244         pipe_select_current = (tmp >> 2) & 3;
245         tmp = (1 << pipe_select_current) |
246               (((gb_pipe_select >> 8) & 0xF) << 4);
247         WREG32_PLL(0x000D, tmp);
248         if (r100_gui_wait_for_idle(rdev)) {
249                 printk(KERN_WARNING "Failed to wait GUI idle while "
250                        "reseting GPU. Bad things might happen.\n");
251         }
252         if (rv515_mc_wait_for_idle(rdev)) {
253                 printk(KERN_WARNING "Failed to wait MC idle while "
254                        "programming pipes. Bad things might happen.\n");
255         }
256 }
257
258 int rv515_ga_reset(struct radeon_device *rdev)
259 {
260         uint32_t tmp;
261         bool reinit_cp;
262         int i;
263
264         reinit_cp = rdev->cp.ready;
265         rdev->cp.ready = false;
266         for (i = 0; i < rdev->usec_timeout; i++) {
267                 WREG32(CP_CSQ_MODE, 0);
268                 WREG32(CP_CSQ_CNTL, 0);
269                 WREG32(RBBM_SOFT_RESET, 0x32005);
270                 (void)RREG32(RBBM_SOFT_RESET);
271                 udelay(200);
272                 WREG32(RBBM_SOFT_RESET, 0);
273                 /* Wait to prevent race in RBBM_STATUS */
274                 mdelay(1);
275                 tmp = RREG32(RBBM_STATUS);
276                 if (tmp & ((1 << 20) | (1 << 26))) {
277                         DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
278                         /* GA still busy soft reset it */
279                         WREG32(0x429C, 0x200);
280                         WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
281                         WREG32(0x43E0, 0);
282                         WREG32(0x43E4, 0);
283                         WREG32(0x24AC, 0);
284                 }
285                 /* Wait to prevent race in RBBM_STATUS */
286                 mdelay(1);
287                 tmp = RREG32(RBBM_STATUS);
288                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
289                         break;
290                 }
291         }
292         for (i = 0; i < rdev->usec_timeout; i++) {
293                 tmp = RREG32(RBBM_STATUS);
294                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
295                         DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
296                                  tmp);
297                         DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
298                         DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
299                         DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
300                         if (reinit_cp) {
301                                 return r100_cp_init(rdev, rdev->cp.ring_size);
302                         }
303                         return 0;
304                 }
305                 DRM_UDELAY(1);
306         }
307         tmp = RREG32(RBBM_STATUS);
308         DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
309         return -1;
310 }
311
312 int rv515_gpu_reset(struct radeon_device *rdev)
313 {
314         uint32_t status;
315
316         /* reset order likely matter */
317         status = RREG32(RBBM_STATUS);
318         /* reset HDP */
319         r100_hdp_reset(rdev);
320         /* reset rb2d */
321         if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
322                 r100_rb2d_reset(rdev);
323         }
324         /* reset GA */
325         if (status & ((1 << 20) | (1 << 26))) {
326                 rv515_ga_reset(rdev);
327         }
328         /* reset CP */
329         status = RREG32(RBBM_STATUS);
330         if (status & (1 << 16)) {
331                 r100_cp_reset(rdev);
332         }
333         /* Check if GPU is idle */
334         status = RREG32(RBBM_STATUS);
335         if (status & (1 << 31)) {
336                 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
337                 return -1;
338         }
339         DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
340         return 0;
341 }
342
343
344 /*
345  * VRAM info
346  */
347 static void rv515_vram_get_type(struct radeon_device *rdev)
348 {
349         uint32_t tmp;
350
351         rdev->mc.vram_width = 128;
352         rdev->mc.vram_is_ddr = true;
353         tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
354         switch (tmp) {
355         case 0:
356                 rdev->mc.vram_width = 64;
357                 break;
358         case 1:
359                 rdev->mc.vram_width = 128;
360                 break;
361         default:
362                 rdev->mc.vram_width = 128;
363                 break;
364         }
365 }
366
367 void rv515_vram_info(struct radeon_device *rdev)
368 {
369         fixed20_12 a;
370
371         rv515_vram_get_type(rdev);
372
373         r100_vram_init_sizes(rdev);
374         /* FIXME: we should enforce default clock in case GPU is not in
375          * default setup
376          */
377         a.full = rfixed_const(100);
378         rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
379         rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
380 }
381
382
383 /*
384  * Indirect registers accessor
385  */
386 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
387 {
388         uint32_t r;
389
390         WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
391         r = RREG32(MC_IND_DATA);
392         WREG32(MC_IND_INDEX, 0);
393         return r;
394 }
395
396 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
397 {
398         WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
399         WREG32(MC_IND_DATA, (v));
400         WREG32(MC_IND_INDEX, 0);
401 }
402
403 /*
404  * Debugfs info
405  */
406 #if defined(CONFIG_DEBUG_FS)
407 static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
408 {
409         struct drm_info_node *node = (struct drm_info_node *) m->private;
410         struct drm_device *dev = node->minor->dev;
411         struct radeon_device *rdev = dev->dev_private;
412         uint32_t tmp;
413
414         tmp = RREG32(GB_PIPE_SELECT);
415         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
416         tmp = RREG32(SU_REG_DEST);
417         seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
418         tmp = RREG32(GB_TILE_CONFIG);
419         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
420         tmp = RREG32(DST_PIPE_CONFIG);
421         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
422         return 0;
423 }
424
425 static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
426 {
427         struct drm_info_node *node = (struct drm_info_node *) m->private;
428         struct drm_device *dev = node->minor->dev;
429         struct radeon_device *rdev = dev->dev_private;
430         uint32_t tmp;
431
432         tmp = RREG32(0x2140);
433         seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
434         radeon_gpu_reset(rdev);
435         tmp = RREG32(0x425C);
436         seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
437         return 0;
438 }
439
440 static struct drm_info_list rv515_pipes_info_list[] = {
441         {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
442 };
443
444 static struct drm_info_list rv515_ga_info_list[] = {
445         {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
446 };
447 #endif
448
449 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
450 {
451 #if defined(CONFIG_DEBUG_FS)
452         return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
453 #else
454         return 0;
455 #endif
456 }
457
458 int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
459 {
460 #if defined(CONFIG_DEBUG_FS)
461         return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
462 #else
463         return 0;
464 #endif
465 }
466
467
468 /*
469  * Asic initialization
470  */
471 static const unsigned r500_reg_safe_bm[219] = {
472         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
473         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
474         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
475         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
476         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
477         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
478         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
479         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
480         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
481         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
482         0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
483         0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
484         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
485         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
486         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
487         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
488         0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
489         0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
490         0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
491         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
492         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
493         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
494         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
495         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
496         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
497         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
498         0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
499         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
500         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
501         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
502         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
503         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
504         0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
505         0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
506         0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
507         0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
508         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
509         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
512         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
515         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
518         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
519         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
520         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
521         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
522         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
523         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
524         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
525         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
526         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
527 };
528
529 int rv515_init(struct radeon_device *rdev)
530 {
531         rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
532         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
533         return 0;
534 }
535
536 void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
537 {
538
539         WREG32(0x659C, 0x0);
540         WREG32(0x6594, 0x705);
541         WREG32(0x65A4, 0x10001);
542         WREG32(0x65D8, 0x0);
543         WREG32(0x65B0, 0x0);
544         WREG32(0x65C0, 0x0);
545         WREG32(0x65D4, 0x0);
546         WREG32(0x6578, 0x0);
547         WREG32(0x657C, 0x841880A8);
548         WREG32(0x6578, 0x1);
549         WREG32(0x657C, 0x84208680);
550         WREG32(0x6578, 0x2);
551         WREG32(0x657C, 0xBFF880B0);
552         WREG32(0x6578, 0x100);
553         WREG32(0x657C, 0x83D88088);
554         WREG32(0x6578, 0x101);
555         WREG32(0x657C, 0x84608680);
556         WREG32(0x6578, 0x102);
557         WREG32(0x657C, 0xBFF080D0);
558         WREG32(0x6578, 0x200);
559         WREG32(0x657C, 0x83988068);
560         WREG32(0x6578, 0x201);
561         WREG32(0x657C, 0x84A08680);
562         WREG32(0x6578, 0x202);
563         WREG32(0x657C, 0xBFF080F8);
564         WREG32(0x6578, 0x300);
565         WREG32(0x657C, 0x83588058);
566         WREG32(0x6578, 0x301);
567         WREG32(0x657C, 0x84E08660);
568         WREG32(0x6578, 0x302);
569         WREG32(0x657C, 0xBFF88120);
570         WREG32(0x6578, 0x400);
571         WREG32(0x657C, 0x83188040);
572         WREG32(0x6578, 0x401);
573         WREG32(0x657C, 0x85008660);
574         WREG32(0x6578, 0x402);
575         WREG32(0x657C, 0xBFF88150);
576         WREG32(0x6578, 0x500);
577         WREG32(0x657C, 0x82D88030);
578         WREG32(0x6578, 0x501);
579         WREG32(0x657C, 0x85408640);
580         WREG32(0x6578, 0x502);
581         WREG32(0x657C, 0xBFF88180);
582         WREG32(0x6578, 0x600);
583         WREG32(0x657C, 0x82A08018);
584         WREG32(0x6578, 0x601);
585         WREG32(0x657C, 0x85808620);
586         WREG32(0x6578, 0x602);
587         WREG32(0x657C, 0xBFF081B8);
588         WREG32(0x6578, 0x700);
589         WREG32(0x657C, 0x82608010);
590         WREG32(0x6578, 0x701);
591         WREG32(0x657C, 0x85A08600);
592         WREG32(0x6578, 0x702);
593         WREG32(0x657C, 0x800081F0);
594         WREG32(0x6578, 0x800);
595         WREG32(0x657C, 0x8228BFF8);
596         WREG32(0x6578, 0x801);
597         WREG32(0x657C, 0x85E085E0);
598         WREG32(0x6578, 0x802);
599         WREG32(0x657C, 0xBFF88228);
600         WREG32(0x6578, 0x10000);
601         WREG32(0x657C, 0x82A8BF00);
602         WREG32(0x6578, 0x10001);
603         WREG32(0x657C, 0x82A08CC0);
604         WREG32(0x6578, 0x10002);
605         WREG32(0x657C, 0x8008BEF8);
606         WREG32(0x6578, 0x10100);
607         WREG32(0x657C, 0x81F0BF28);
608         WREG32(0x6578, 0x10101);
609         WREG32(0x657C, 0x83608CA0);
610         WREG32(0x6578, 0x10102);
611         WREG32(0x657C, 0x8018BED0);
612         WREG32(0x6578, 0x10200);
613         WREG32(0x657C, 0x8148BF38);
614         WREG32(0x6578, 0x10201);
615         WREG32(0x657C, 0x84408C80);
616         WREG32(0x6578, 0x10202);
617         WREG32(0x657C, 0x8008BEB8);
618         WREG32(0x6578, 0x10300);
619         WREG32(0x657C, 0x80B0BF78);
620         WREG32(0x6578, 0x10301);
621         WREG32(0x657C, 0x85008C20);
622         WREG32(0x6578, 0x10302);
623         WREG32(0x657C, 0x8020BEA0);
624         WREG32(0x6578, 0x10400);
625         WREG32(0x657C, 0x8028BF90);
626         WREG32(0x6578, 0x10401);
627         WREG32(0x657C, 0x85E08BC0);
628         WREG32(0x6578, 0x10402);
629         WREG32(0x657C, 0x8018BE90);
630         WREG32(0x6578, 0x10500);
631         WREG32(0x657C, 0xBFB8BFB0);
632         WREG32(0x6578, 0x10501);
633         WREG32(0x657C, 0x86C08B40);
634         WREG32(0x6578, 0x10502);
635         WREG32(0x657C, 0x8010BE90);
636         WREG32(0x6578, 0x10600);
637         WREG32(0x657C, 0xBF58BFC8);
638         WREG32(0x6578, 0x10601);
639         WREG32(0x657C, 0x87A08AA0);
640         WREG32(0x6578, 0x10602);
641         WREG32(0x657C, 0x8010BE98);
642         WREG32(0x6578, 0x10700);
643         WREG32(0x657C, 0xBF10BFF0);
644         WREG32(0x6578, 0x10701);
645         WREG32(0x657C, 0x886089E0);
646         WREG32(0x6578, 0x10702);
647         WREG32(0x657C, 0x8018BEB0);
648         WREG32(0x6578, 0x10800);
649         WREG32(0x657C, 0xBED8BFE8);
650         WREG32(0x6578, 0x10801);
651         WREG32(0x657C, 0x89408940);
652         WREG32(0x6578, 0x10802);
653         WREG32(0x657C, 0xBFE8BED8);
654         WREG32(0x6578, 0x20000);
655         WREG32(0x657C, 0x80008000);
656         WREG32(0x6578, 0x20001);
657         WREG32(0x657C, 0x90008000);
658         WREG32(0x6578, 0x20002);
659         WREG32(0x657C, 0x80008000);
660         WREG32(0x6578, 0x20003);
661         WREG32(0x657C, 0x80008000);
662         WREG32(0x6578, 0x20100);
663         WREG32(0x657C, 0x80108000);
664         WREG32(0x6578, 0x20101);
665         WREG32(0x657C, 0x8FE0BF70);
666         WREG32(0x6578, 0x20102);
667         WREG32(0x657C, 0xBFE880C0);
668         WREG32(0x6578, 0x20103);
669         WREG32(0x657C, 0x80008000);
670         WREG32(0x6578, 0x20200);
671         WREG32(0x657C, 0x8018BFF8);
672         WREG32(0x6578, 0x20201);
673         WREG32(0x657C, 0x8F80BF08);
674         WREG32(0x6578, 0x20202);
675         WREG32(0x657C, 0xBFD081A0);
676         WREG32(0x6578, 0x20203);
677         WREG32(0x657C, 0xBFF88000);
678         WREG32(0x6578, 0x20300);
679         WREG32(0x657C, 0x80188000);
680         WREG32(0x6578, 0x20301);
681         WREG32(0x657C, 0x8EE0BEC0);
682         WREG32(0x6578, 0x20302);
683         WREG32(0x657C, 0xBFB082A0);
684         WREG32(0x6578, 0x20303);
685         WREG32(0x657C, 0x80008000);
686         WREG32(0x6578, 0x20400);
687         WREG32(0x657C, 0x80188000);
688         WREG32(0x6578, 0x20401);
689         WREG32(0x657C, 0x8E00BEA0);
690         WREG32(0x6578, 0x20402);
691         WREG32(0x657C, 0xBF8883C0);
692         WREG32(0x6578, 0x20403);
693         WREG32(0x657C, 0x80008000);
694         WREG32(0x6578, 0x20500);
695         WREG32(0x657C, 0x80188000);
696         WREG32(0x6578, 0x20501);
697         WREG32(0x657C, 0x8D00BE90);
698         WREG32(0x6578, 0x20502);
699         WREG32(0x657C, 0xBF588500);
700         WREG32(0x6578, 0x20503);
701         WREG32(0x657C, 0x80008008);
702         WREG32(0x6578, 0x20600);
703         WREG32(0x657C, 0x80188000);
704         WREG32(0x6578, 0x20601);
705         WREG32(0x657C, 0x8BC0BE98);
706         WREG32(0x6578, 0x20602);
707         WREG32(0x657C, 0xBF308660);
708         WREG32(0x6578, 0x20603);
709         WREG32(0x657C, 0x80008008);
710         WREG32(0x6578, 0x20700);
711         WREG32(0x657C, 0x80108000);
712         WREG32(0x6578, 0x20701);
713         WREG32(0x657C, 0x8A80BEB0);
714         WREG32(0x6578, 0x20702);
715         WREG32(0x657C, 0xBF0087C0);
716         WREG32(0x6578, 0x20703);
717         WREG32(0x657C, 0x80008008);
718         WREG32(0x6578, 0x20800);
719         WREG32(0x657C, 0x80108000);
720         WREG32(0x6578, 0x20801);
721         WREG32(0x657C, 0x8920BED0);
722         WREG32(0x6578, 0x20802);
723         WREG32(0x657C, 0xBED08920);
724         WREG32(0x6578, 0x20803);
725         WREG32(0x657C, 0x80008010);
726         WREG32(0x6578, 0x30000);
727         WREG32(0x657C, 0x90008000);
728         WREG32(0x6578, 0x30001);
729         WREG32(0x657C, 0x80008000);
730         WREG32(0x6578, 0x30100);
731         WREG32(0x657C, 0x8FE0BF90);
732         WREG32(0x6578, 0x30101);
733         WREG32(0x657C, 0xBFF880A0);
734         WREG32(0x6578, 0x30200);
735         WREG32(0x657C, 0x8F60BF40);
736         WREG32(0x6578, 0x30201);
737         WREG32(0x657C, 0xBFE88180);
738         WREG32(0x6578, 0x30300);
739         WREG32(0x657C, 0x8EC0BF00);
740         WREG32(0x6578, 0x30301);
741         WREG32(0x657C, 0xBFC88280);
742         WREG32(0x6578, 0x30400);
743         WREG32(0x657C, 0x8DE0BEE0);
744         WREG32(0x6578, 0x30401);
745         WREG32(0x657C, 0xBFA083A0);
746         WREG32(0x6578, 0x30500);
747         WREG32(0x657C, 0x8CE0BED0);
748         WREG32(0x6578, 0x30501);
749         WREG32(0x657C, 0xBF7884E0);
750         WREG32(0x6578, 0x30600);
751         WREG32(0x657C, 0x8BA0BED8);
752         WREG32(0x6578, 0x30601);
753         WREG32(0x657C, 0xBF508640);
754         WREG32(0x6578, 0x30700);
755         WREG32(0x657C, 0x8A60BEE8);
756         WREG32(0x6578, 0x30701);
757         WREG32(0x657C, 0xBF2087A0);
758         WREG32(0x6578, 0x30800);
759         WREG32(0x657C, 0x8900BF00);
760         WREG32(0x6578, 0x30801);
761         WREG32(0x657C, 0xBF008900);
762 }
763
764 struct rv515_watermark {
765         u32        lb_request_fifo_depth;
766         fixed20_12 num_line_pair;
767         fixed20_12 estimated_width;
768         fixed20_12 worst_case_latency;
769         fixed20_12 consumption_rate;
770         fixed20_12 active_time;
771         fixed20_12 dbpp;
772         fixed20_12 priority_mark_max;
773         fixed20_12 priority_mark;
774         fixed20_12 sclk;
775 };
776
777 void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
778                                   struct radeon_crtc *crtc,
779                                   struct rv515_watermark *wm)
780 {
781         struct drm_display_mode *mode = &crtc->base.mode;
782         fixed20_12 a, b, c;
783         fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
784         fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
785
786         if (!crtc->base.enabled) {
787                 /* FIXME: wouldn't it better to set priority mark to maximum */
788                 wm->lb_request_fifo_depth = 4;
789                 return;
790         }
791
792         if (crtc->vsc.full > rfixed_const(2))
793                 wm->num_line_pair.full = rfixed_const(2);
794         else
795                 wm->num_line_pair.full = rfixed_const(1);
796
797         b.full = rfixed_const(mode->crtc_hdisplay);
798         c.full = rfixed_const(256);
799         a.full = rfixed_mul(wm->num_line_pair, b);
800         request_fifo_depth.full = rfixed_div(a, c);
801         if (a.full < rfixed_const(4)) {
802                 wm->lb_request_fifo_depth = 4;
803         } else {
804                 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
805         }
806
807         /* Determine consumption rate
808          *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
809          *  vtaps = number of vertical taps,
810          *  vsc = vertical scaling ratio, defined as source/destination
811          *  hsc = horizontal scaling ration, defined as source/destination
812          */
813         a.full = rfixed_const(mode->clock);
814         b.full = rfixed_const(1000);
815         a.full = rfixed_div(a, b);
816         pclk.full = rfixed_div(b, a);
817         if (crtc->rmx_type != RMX_OFF) {
818                 b.full = rfixed_const(2);
819                 if (crtc->vsc.full > b.full)
820                         b.full = crtc->vsc.full;
821                 b.full = rfixed_mul(b, crtc->hsc);
822                 c.full = rfixed_const(2);
823                 b.full = rfixed_div(b, c);
824                 consumption_time.full = rfixed_div(pclk, b);
825         } else {
826                 consumption_time.full = pclk.full;
827         }
828         a.full = rfixed_const(1);
829         wm->consumption_rate.full = rfixed_div(a, consumption_time);
830
831
832         /* Determine line time
833          *  LineTime = total time for one line of displayhtotal
834          *  LineTime = total number of horizontal pixels
835          *  pclk = pixel clock period(ns)
836          */
837         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
838         line_time.full = rfixed_mul(a, pclk);
839
840         /* Determine active time
841          *  ActiveTime = time of active region of display within one line,
842          *  hactive = total number of horizontal active pixels
843          *  htotal = total number of horizontal pixels
844          */
845         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
846         b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
847         wm->active_time.full = rfixed_mul(line_time, b);
848         wm->active_time.full = rfixed_div(wm->active_time, a);
849
850         /* Determine chunk time
851          * ChunkTime = the time it takes the DCP to send one chunk of data
852          * to the LB which consists of pipeline delay and inter chunk gap
853          * sclk = system clock(Mhz)
854          */
855         a.full = rfixed_const(600 * 1000);
856         chunk_time.full = rfixed_div(a, rdev->pm.sclk);
857         read_delay_latency.full = rfixed_const(1000);
858
859         /* Determine the worst case latency
860          * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
861          * WorstCaseLatency = worst case time from urgent to when the MC starts
862          *                    to return data
863          * READ_DELAY_IDLE_MAX = constant of 1us
864          * ChunkTime = time it takes the DCP to send one chunk of data to the LB
865          *             which consists of pipeline delay and inter chunk gap
866          */
867         if (rfixed_trunc(wm->num_line_pair) > 1) {
868                 a.full = rfixed_const(3);
869                 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
870                 wm->worst_case_latency.full += read_delay_latency.full;
871         } else {
872                 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
873         }
874
875         /* Determine the tolerable latency
876          * TolerableLatency = Any given request has only 1 line time
877          *                    for the data to be returned
878          * LBRequestFifoDepth = Number of chunk requests the LB can
879          *                      put into the request FIFO for a display
880          *  LineTime = total time for one line of display
881          *  ChunkTime = the time it takes the DCP to send one chunk
882          *              of data to the LB which consists of
883          *  pipeline delay and inter chunk gap
884          */
885         if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
886                 tolerable_latency.full = line_time.full;
887         } else {
888                 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
889                 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
890                 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
891                 tolerable_latency.full = line_time.full - tolerable_latency.full;
892         }
893         /* We assume worst case 32bits (4 bytes) */
894         wm->dbpp.full = rfixed_const(2 * 16);
895
896         /* Determine the maximum priority mark
897          *  width = viewport width in pixels
898          */
899         a.full = rfixed_const(16);
900         wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
901         wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
902
903         /* Determine estimated width */
904         estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
905         estimated_width.full = rfixed_div(estimated_width, consumption_time);
906         if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
907                 wm->priority_mark.full = rfixed_const(10);
908         } else {
909                 a.full = rfixed_const(16);
910                 wm->priority_mark.full = rfixed_div(estimated_width, a);
911                 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
912         }
913 }
914
915 void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
916 {
917         struct drm_display_mode *mode0 = NULL;
918         struct drm_display_mode *mode1 = NULL;
919         struct rv515_watermark wm0;
920         struct rv515_watermark wm1;
921         u32 tmp;
922         fixed20_12 priority_mark02, priority_mark12, fill_rate;
923         fixed20_12 a, b;
924
925         if (rdev->mode_info.crtcs[0]->base.enabled)
926                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
927         if (rdev->mode_info.crtcs[1]->base.enabled)
928                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
929         rs690_line_buffer_adjust(rdev, mode0, mode1);
930
931         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
932         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
933
934         tmp = wm0.lb_request_fifo_depth;
935         tmp |= wm1.lb_request_fifo_depth << 16;
936         WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
937
938         if (mode0 && mode1) {
939                 if (rfixed_trunc(wm0.dbpp) > 64)
940                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
941                 else
942                         a.full = wm0.num_line_pair.full;
943                 if (rfixed_trunc(wm1.dbpp) > 64)
944                         b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
945                 else
946                         b.full = wm1.num_line_pair.full;
947                 a.full += b.full;
948                 fill_rate.full = rfixed_div(wm0.sclk, a);
949                 if (wm0.consumption_rate.full > fill_rate.full) {
950                         b.full = wm0.consumption_rate.full - fill_rate.full;
951                         b.full = rfixed_mul(b, wm0.active_time);
952                         a.full = rfixed_const(16);
953                         b.full = rfixed_div(b, a);
954                         a.full = rfixed_mul(wm0.worst_case_latency,
955                                                 wm0.consumption_rate);
956                         priority_mark02.full = a.full + b.full;
957                 } else {
958                         a.full = rfixed_mul(wm0.worst_case_latency,
959                                                 wm0.consumption_rate);
960                         b.full = rfixed_const(16 * 1000);
961                         priority_mark02.full = rfixed_div(a, b);
962                 }
963                 if (wm1.consumption_rate.full > fill_rate.full) {
964                         b.full = wm1.consumption_rate.full - fill_rate.full;
965                         b.full = rfixed_mul(b, wm1.active_time);
966                         a.full = rfixed_const(16);
967                         b.full = rfixed_div(b, a);
968                         a.full = rfixed_mul(wm1.worst_case_latency,
969                                                 wm1.consumption_rate);
970                         priority_mark12.full = a.full + b.full;
971                 } else {
972                         a.full = rfixed_mul(wm1.worst_case_latency,
973                                                 wm1.consumption_rate);
974                         b.full = rfixed_const(16 * 1000);
975                         priority_mark12.full = rfixed_div(a, b);
976                 }
977                 if (wm0.priority_mark.full > priority_mark02.full)
978                         priority_mark02.full = wm0.priority_mark.full;
979                 if (rfixed_trunc(priority_mark02) < 0)
980                         priority_mark02.full = 0;
981                 if (wm0.priority_mark_max.full > priority_mark02.full)
982                         priority_mark02.full = wm0.priority_mark_max.full;
983                 if (wm1.priority_mark.full > priority_mark12.full)
984                         priority_mark12.full = wm1.priority_mark.full;
985                 if (rfixed_trunc(priority_mark12) < 0)
986                         priority_mark12.full = 0;
987                 if (wm1.priority_mark_max.full > priority_mark12.full)
988                         priority_mark12.full = wm1.priority_mark_max.full;
989                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
990                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
991                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
992                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
993         } else if (mode0) {
994                 if (rfixed_trunc(wm0.dbpp) > 64)
995                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
996                 else
997                         a.full = wm0.num_line_pair.full;
998                 fill_rate.full = rfixed_div(wm0.sclk, a);
999                 if (wm0.consumption_rate.full > fill_rate.full) {
1000                         b.full = wm0.consumption_rate.full - fill_rate.full;
1001                         b.full = rfixed_mul(b, wm0.active_time);
1002                         a.full = rfixed_const(16);
1003                         b.full = rfixed_div(b, a);
1004                         a.full = rfixed_mul(wm0.worst_case_latency,
1005                                                 wm0.consumption_rate);
1006                         priority_mark02.full = a.full + b.full;
1007                 } else {
1008                         a.full = rfixed_mul(wm0.worst_case_latency,
1009                                                 wm0.consumption_rate);
1010                         b.full = rfixed_const(16);
1011                         priority_mark02.full = rfixed_div(a, b);
1012                 }
1013                 if (wm0.priority_mark.full > priority_mark02.full)
1014                         priority_mark02.full = wm0.priority_mark.full;
1015                 if (rfixed_trunc(priority_mark02) < 0)
1016                         priority_mark02.full = 0;
1017                 if (wm0.priority_mark_max.full > priority_mark02.full)
1018                         priority_mark02.full = wm0.priority_mark_max.full;
1019                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1020                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1021                 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1022                 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1023         } else {
1024                 if (rfixed_trunc(wm1.dbpp) > 64)
1025                         a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
1026                 else
1027                         a.full = wm1.num_line_pair.full;
1028                 fill_rate.full = rfixed_div(wm1.sclk, a);
1029                 if (wm1.consumption_rate.full > fill_rate.full) {
1030                         b.full = wm1.consumption_rate.full - fill_rate.full;
1031                         b.full = rfixed_mul(b, wm1.active_time);
1032                         a.full = rfixed_const(16);
1033                         b.full = rfixed_div(b, a);
1034                         a.full = rfixed_mul(wm1.worst_case_latency,
1035                                                 wm1.consumption_rate);
1036                         priority_mark12.full = a.full + b.full;
1037                 } else {
1038                         a.full = rfixed_mul(wm1.worst_case_latency,
1039                                                 wm1.consumption_rate);
1040                         b.full = rfixed_const(16 * 1000);
1041                         priority_mark12.full = rfixed_div(a, b);
1042                 }
1043                 if (wm1.priority_mark.full > priority_mark12.full)
1044                         priority_mark12.full = wm1.priority_mark.full;
1045                 if (rfixed_trunc(priority_mark12) < 0)
1046                         priority_mark12.full = 0;
1047                 if (wm1.priority_mark_max.full > priority_mark12.full)
1048                         priority_mark12.full = wm1.priority_mark_max.full;
1049                 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1050                 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1051                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1052                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1053         }
1054 }
1055
1056 void rv515_bandwidth_update(struct radeon_device *rdev)
1057 {
1058         uint32_t tmp;
1059         struct drm_display_mode *mode0 = NULL;
1060         struct drm_display_mode *mode1 = NULL;
1061
1062         if (rdev->mode_info.crtcs[0]->base.enabled)
1063                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1064         if (rdev->mode_info.crtcs[1]->base.enabled)
1065                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1066         /*
1067          * Set display0/1 priority up in the memory controller for
1068          * modes if the user specifies HIGH for displaypriority
1069          * option.
1070          */
1071         if (rdev->disp_priority == 2) {
1072                 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1073                 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1074                 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1075                 if (mode1)
1076                         tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1077                 if (mode0)
1078                         tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1079                 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1080         }
1081         rv515_bandwidth_avivo_update(rdev);
1082 }