drm/radeon/kms: fix bandwidth computation on avivo hardware
[pandora-kernel.git] / drivers / gpu / drm / radeon / rv515.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "rv515r.h"
31 #include "radeon.h"
32 #include "radeon_share.h"
33
34 /* rv515 depends on : */
35 void r100_hdp_reset(struct radeon_device *rdev);
36 int r100_cp_reset(struct radeon_device *rdev);
37 int r100_rb2d_reset(struct radeon_device *rdev);
38 int r100_gui_wait_for_idle(struct radeon_device *rdev);
39 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40 int rv370_pcie_gart_enable(struct radeon_device *rdev);
41 void rv370_pcie_gart_disable(struct radeon_device *rdev);
42 void r420_pipes_init(struct radeon_device *rdev);
43 void rs600_mc_disable_clients(struct radeon_device *rdev);
44 void rs600_disable_vga(struct radeon_device *rdev);
45
46 /* This files gather functions specifics to:
47  * rv515
48  *
49  * Some of these functions might be used by newer ASICs.
50  */
51 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
52 int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
53 void rv515_gpu_init(struct radeon_device *rdev);
54 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
55
56
57 /*
58  * MC
59  */
60 int rv515_mc_init(struct radeon_device *rdev)
61 {
62         uint32_t tmp;
63         int r;
64
65         if (r100_debugfs_rbbm_init(rdev)) {
66                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
67         }
68         if (rv515_debugfs_pipes_info_init(rdev)) {
69                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
70         }
71         if (rv515_debugfs_ga_info_init(rdev)) {
72                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
73         }
74
75         rv515_gpu_init(rdev);
76         rv370_pcie_gart_disable(rdev);
77
78         /* Setup GPU memory space */
79         rdev->mc.vram_location = 0xFFFFFFFFUL;
80         rdev->mc.gtt_location = 0xFFFFFFFFUL;
81         if (rdev->flags & RADEON_IS_AGP) {
82                 r = radeon_agp_init(rdev);
83                 if (r) {
84                         printk(KERN_WARNING "[drm] Disabling AGP\n");
85                         rdev->flags &= ~RADEON_IS_AGP;
86                         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
87                 } else {
88                         rdev->mc.gtt_location = rdev->mc.agp_base;
89                 }
90         }
91         r = radeon_mc_setup(rdev);
92         if (r) {
93                 return r;
94         }
95
96         /* Program GPU memory space */
97         rs600_mc_disable_clients(rdev);
98         if (rv515_mc_wait_for_idle(rdev)) {
99                 printk(KERN_WARNING "Failed to wait MC idle while "
100                        "programming pipes. Bad things might happen.\n");
101         }
102         /* Write VRAM size in case we are limiting it */
103         WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
104         tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
105         WREG32(0x134, tmp);
106         tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
107         tmp = REG_SET(MC_FB_TOP, tmp >> 16);
108         tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
109         WREG32_MC(MC_FB_LOCATION, tmp);
110         WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
111         WREG32(0x310, rdev->mc.vram_location);
112         if (rdev->flags & RADEON_IS_AGP) {
113                 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
114                 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
115                 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
116                 WREG32_MC(MC_AGP_LOCATION, tmp);
117                 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
118                 WREG32_MC(MC_AGP_BASE_2, 0);
119         } else {
120                 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
121                 WREG32_MC(MC_AGP_BASE, 0);
122                 WREG32_MC(MC_AGP_BASE_2, 0);
123         }
124         return 0;
125 }
126
127 void rv515_mc_fini(struct radeon_device *rdev)
128 {
129         rv370_pcie_gart_disable(rdev);
130         radeon_gart_table_vram_free(rdev);
131         radeon_gart_fini(rdev);
132 }
133
134
135 /*
136  * Global GPU functions
137  */
138 void rv515_ring_start(struct radeon_device *rdev)
139 {
140         int r;
141
142         r = radeon_ring_lock(rdev, 64);
143         if (r) {
144                 return;
145         }
146         radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
147         radeon_ring_write(rdev,
148                           ISYNC_ANY2D_IDLE3D |
149                           ISYNC_ANY3D_IDLE2D |
150                           ISYNC_WAIT_IDLEGUI |
151                           ISYNC_CPSCRATCH_IDLEGUI);
152         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
153         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
154         radeon_ring_write(rdev, PACKET0(0x170C, 0));
155         radeon_ring_write(rdev, 1 << 31);
156         radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
157         radeon_ring_write(rdev, 0);
158         radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
159         radeon_ring_write(rdev, 0);
160         radeon_ring_write(rdev, PACKET0(0x42C8, 0));
161         radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
162         radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
163         radeon_ring_write(rdev, 0);
164         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
165         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
166         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
167         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
168         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
169         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
170         radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
171         radeon_ring_write(rdev, 0);
172         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
173         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
174         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
175         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
176         radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
177         radeon_ring_write(rdev,
178                           ((6 << MS_X0_SHIFT) |
179                            (6 << MS_Y0_SHIFT) |
180                            (6 << MS_X1_SHIFT) |
181                            (6 << MS_Y1_SHIFT) |
182                            (6 << MS_X2_SHIFT) |
183                            (6 << MS_Y2_SHIFT) |
184                            (6 << MSBD0_Y_SHIFT) |
185                            (6 << MSBD0_X_SHIFT)));
186         radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
187         radeon_ring_write(rdev,
188                           ((6 << MS_X3_SHIFT) |
189                            (6 << MS_Y3_SHIFT) |
190                            (6 << MS_X4_SHIFT) |
191                            (6 << MS_Y4_SHIFT) |
192                            (6 << MS_X5_SHIFT) |
193                            (6 << MS_Y5_SHIFT) |
194                            (6 << MSBD1_SHIFT)));
195         radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
196         radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
197         radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
198         radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
199         radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
200         radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
201         radeon_ring_write(rdev, PACKET0(0x20C8, 0));
202         radeon_ring_write(rdev, 0);
203         radeon_ring_unlock_commit(rdev);
204 }
205
206 void rv515_errata(struct radeon_device *rdev)
207 {
208         rdev->pll_errata = 0;
209 }
210
211 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
212 {
213         unsigned i;
214         uint32_t tmp;
215
216         for (i = 0; i < rdev->usec_timeout; i++) {
217                 /* read MC_STATUS */
218                 tmp = RREG32_MC(MC_STATUS);
219                 if (tmp & MC_STATUS_IDLE) {
220                         return 0;
221                 }
222                 DRM_UDELAY(1);
223         }
224         return -1;
225 }
226
227 void rv515_gpu_init(struct radeon_device *rdev)
228 {
229         unsigned pipe_select_current, gb_pipe_select, tmp;
230
231         r100_hdp_reset(rdev);
232         r100_rb2d_reset(rdev);
233
234         if (r100_gui_wait_for_idle(rdev)) {
235                 printk(KERN_WARNING "Failed to wait GUI idle while "
236                        "reseting GPU. Bad things might happen.\n");
237         }
238
239         rs600_disable_vga(rdev);
240
241         r420_pipes_init(rdev);
242         gb_pipe_select = RREG32(0x402C);
243         tmp = RREG32(0x170C);
244         pipe_select_current = (tmp >> 2) & 3;
245         tmp = (1 << pipe_select_current) |
246               (((gb_pipe_select >> 8) & 0xF) << 4);
247         WREG32_PLL(0x000D, tmp);
248         if (r100_gui_wait_for_idle(rdev)) {
249                 printk(KERN_WARNING "Failed to wait GUI idle while "
250                        "reseting GPU. Bad things might happen.\n");
251         }
252         if (rv515_mc_wait_for_idle(rdev)) {
253                 printk(KERN_WARNING "Failed to wait MC idle while "
254                        "programming pipes. Bad things might happen.\n");
255         }
256 }
257
258 int rv515_ga_reset(struct radeon_device *rdev)
259 {
260         uint32_t tmp;
261         bool reinit_cp;
262         int i;
263
264         reinit_cp = rdev->cp.ready;
265         rdev->cp.ready = false;
266         for (i = 0; i < rdev->usec_timeout; i++) {
267                 WREG32(CP_CSQ_MODE, 0);
268                 WREG32(CP_CSQ_CNTL, 0);
269                 WREG32(RBBM_SOFT_RESET, 0x32005);
270                 (void)RREG32(RBBM_SOFT_RESET);
271                 udelay(200);
272                 WREG32(RBBM_SOFT_RESET, 0);
273                 /* Wait to prevent race in RBBM_STATUS */
274                 mdelay(1);
275                 tmp = RREG32(RBBM_STATUS);
276                 if (tmp & ((1 << 20) | (1 << 26))) {
277                         DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
278                         /* GA still busy soft reset it */
279                         WREG32(0x429C, 0x200);
280                         WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
281                         WREG32(0x43E0, 0);
282                         WREG32(0x43E4, 0);
283                         WREG32(0x24AC, 0);
284                 }
285                 /* Wait to prevent race in RBBM_STATUS */
286                 mdelay(1);
287                 tmp = RREG32(RBBM_STATUS);
288                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
289                         break;
290                 }
291         }
292         for (i = 0; i < rdev->usec_timeout; i++) {
293                 tmp = RREG32(RBBM_STATUS);
294                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
295                         DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
296                                  tmp);
297                         DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
298                         DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
299                         DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
300                         if (reinit_cp) {
301                                 return r100_cp_init(rdev, rdev->cp.ring_size);
302                         }
303                         return 0;
304                 }
305                 DRM_UDELAY(1);
306         }
307         tmp = RREG32(RBBM_STATUS);
308         DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
309         return -1;
310 }
311
312 int rv515_gpu_reset(struct radeon_device *rdev)
313 {
314         uint32_t status;
315
316         /* reset order likely matter */
317         status = RREG32(RBBM_STATUS);
318         /* reset HDP */
319         r100_hdp_reset(rdev);
320         /* reset rb2d */
321         if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
322                 r100_rb2d_reset(rdev);
323         }
324         /* reset GA */
325         if (status & ((1 << 20) | (1 << 26))) {
326                 rv515_ga_reset(rdev);
327         }
328         /* reset CP */
329         status = RREG32(RBBM_STATUS);
330         if (status & (1 << 16)) {
331                 r100_cp_reset(rdev);
332         }
333         /* Check if GPU is idle */
334         status = RREG32(RBBM_STATUS);
335         if (status & (1 << 31)) {
336                 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
337                 return -1;
338         }
339         DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
340         return 0;
341 }
342
343
344 /*
345  * VRAM info
346  */
347 static void rv515_vram_get_type(struct radeon_device *rdev)
348 {
349         uint32_t tmp;
350
351         rdev->mc.vram_width = 128;
352         rdev->mc.vram_is_ddr = true;
353         tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
354         switch (tmp) {
355         case 0:
356                 rdev->mc.vram_width = 64;
357                 break;
358         case 1:
359                 rdev->mc.vram_width = 128;
360                 break;
361         default:
362                 rdev->mc.vram_width = 128;
363                 break;
364         }
365 }
366
367 void rv515_vram_info(struct radeon_device *rdev)
368 {
369         fixed20_12 a;
370
371         rv515_vram_get_type(rdev);
372         rdev->mc.vram_size = RREG32(CONFIG_MEMSIZE);
373
374         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
375         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
376         /* FIXME: we should enforce default clock in case GPU is not in
377          * default setup
378          */
379         a.full = rfixed_const(100);
380         rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
381         rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
382 }
383
384
385 /*
386  * Indirect registers accessor
387  */
388 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
389 {
390         uint32_t r;
391
392         WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
393         r = RREG32(MC_IND_DATA);
394         WREG32(MC_IND_INDEX, 0);
395         return r;
396 }
397
398 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
399 {
400         WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
401         WREG32(MC_IND_DATA, (v));
402         WREG32(MC_IND_INDEX, 0);
403 }
404
405 uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
406 {
407         uint32_t r;
408
409         WREG32(PCIE_INDEX, ((reg) & 0x7ff));
410         (void)RREG32(PCIE_INDEX);
411         r = RREG32(PCIE_DATA);
412         return r;
413 }
414
415 void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
416 {
417         WREG32(PCIE_INDEX, ((reg) & 0x7ff));
418         (void)RREG32(PCIE_INDEX);
419         WREG32(PCIE_DATA, (v));
420         (void)RREG32(PCIE_DATA);
421 }
422
423
424 /*
425  * Debugfs info
426  */
427 #if defined(CONFIG_DEBUG_FS)
428 static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
429 {
430         struct drm_info_node *node = (struct drm_info_node *) m->private;
431         struct drm_device *dev = node->minor->dev;
432         struct radeon_device *rdev = dev->dev_private;
433         uint32_t tmp;
434
435         tmp = RREG32(GB_PIPE_SELECT);
436         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
437         tmp = RREG32(SU_REG_DEST);
438         seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
439         tmp = RREG32(GB_TILE_CONFIG);
440         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
441         tmp = RREG32(DST_PIPE_CONFIG);
442         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
443         return 0;
444 }
445
446 static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
447 {
448         struct drm_info_node *node = (struct drm_info_node *) m->private;
449         struct drm_device *dev = node->minor->dev;
450         struct radeon_device *rdev = dev->dev_private;
451         uint32_t tmp;
452
453         tmp = RREG32(0x2140);
454         seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
455         radeon_gpu_reset(rdev);
456         tmp = RREG32(0x425C);
457         seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
458         return 0;
459 }
460
461 static struct drm_info_list rv515_pipes_info_list[] = {
462         {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
463 };
464
465 static struct drm_info_list rv515_ga_info_list[] = {
466         {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
467 };
468 #endif
469
470 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
471 {
472 #if defined(CONFIG_DEBUG_FS)
473         return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
474 #else
475         return 0;
476 #endif
477 }
478
479 int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
480 {
481 #if defined(CONFIG_DEBUG_FS)
482         return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
483 #else
484         return 0;
485 #endif
486 }
487
488
489 /*
490  * Asic initialization
491  */
492 static const unsigned r500_reg_safe_bm[219] = {
493         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
494         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
495         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
496         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
497         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
498         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
499         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
500         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
501         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
502         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
503         0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
504         0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
505         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
506         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
507         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
508         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
509         0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
510         0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
511         0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
512         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
515         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
518         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
519         0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
520         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
521         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
522         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
523         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
524         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
525         0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
526         0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
527         0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
528         0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
529         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
530         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
531         0x00000000, 0x00000000, 0x00000000, 0x00000000,
532         0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
533         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
545         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
546         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
547         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
548 };
549
550 int rv515_init(struct radeon_device *rdev)
551 {
552         rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
553         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
554         return 0;
555 }
556
557 void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
558 {
559
560         WREG32(0x659C, 0x0);
561         WREG32(0x6594, 0x705);
562         WREG32(0x65A4, 0x10001);
563         WREG32(0x65D8, 0x0);
564         WREG32(0x65B0, 0x0);
565         WREG32(0x65C0, 0x0);
566         WREG32(0x65D4, 0x0);
567         WREG32(0x6578, 0x0);
568         WREG32(0x657C, 0x841880A8);
569         WREG32(0x6578, 0x1);
570         WREG32(0x657C, 0x84208680);
571         WREG32(0x6578, 0x2);
572         WREG32(0x657C, 0xBFF880B0);
573         WREG32(0x6578, 0x100);
574         WREG32(0x657C, 0x83D88088);
575         WREG32(0x6578, 0x101);
576         WREG32(0x657C, 0x84608680);
577         WREG32(0x6578, 0x102);
578         WREG32(0x657C, 0xBFF080D0);
579         WREG32(0x6578, 0x200);
580         WREG32(0x657C, 0x83988068);
581         WREG32(0x6578, 0x201);
582         WREG32(0x657C, 0x84A08680);
583         WREG32(0x6578, 0x202);
584         WREG32(0x657C, 0xBFF080F8);
585         WREG32(0x6578, 0x300);
586         WREG32(0x657C, 0x83588058);
587         WREG32(0x6578, 0x301);
588         WREG32(0x657C, 0x84E08660);
589         WREG32(0x6578, 0x302);
590         WREG32(0x657C, 0xBFF88120);
591         WREG32(0x6578, 0x400);
592         WREG32(0x657C, 0x83188040);
593         WREG32(0x6578, 0x401);
594         WREG32(0x657C, 0x85008660);
595         WREG32(0x6578, 0x402);
596         WREG32(0x657C, 0xBFF88150);
597         WREG32(0x6578, 0x500);
598         WREG32(0x657C, 0x82D88030);
599         WREG32(0x6578, 0x501);
600         WREG32(0x657C, 0x85408640);
601         WREG32(0x6578, 0x502);
602         WREG32(0x657C, 0xBFF88180);
603         WREG32(0x6578, 0x600);
604         WREG32(0x657C, 0x82A08018);
605         WREG32(0x6578, 0x601);
606         WREG32(0x657C, 0x85808620);
607         WREG32(0x6578, 0x602);
608         WREG32(0x657C, 0xBFF081B8);
609         WREG32(0x6578, 0x700);
610         WREG32(0x657C, 0x82608010);
611         WREG32(0x6578, 0x701);
612         WREG32(0x657C, 0x85A08600);
613         WREG32(0x6578, 0x702);
614         WREG32(0x657C, 0x800081F0);
615         WREG32(0x6578, 0x800);
616         WREG32(0x657C, 0x8228BFF8);
617         WREG32(0x6578, 0x801);
618         WREG32(0x657C, 0x85E085E0);
619         WREG32(0x6578, 0x802);
620         WREG32(0x657C, 0xBFF88228);
621         WREG32(0x6578, 0x10000);
622         WREG32(0x657C, 0x82A8BF00);
623         WREG32(0x6578, 0x10001);
624         WREG32(0x657C, 0x82A08CC0);
625         WREG32(0x6578, 0x10002);
626         WREG32(0x657C, 0x8008BEF8);
627         WREG32(0x6578, 0x10100);
628         WREG32(0x657C, 0x81F0BF28);
629         WREG32(0x6578, 0x10101);
630         WREG32(0x657C, 0x83608CA0);
631         WREG32(0x6578, 0x10102);
632         WREG32(0x657C, 0x8018BED0);
633         WREG32(0x6578, 0x10200);
634         WREG32(0x657C, 0x8148BF38);
635         WREG32(0x6578, 0x10201);
636         WREG32(0x657C, 0x84408C80);
637         WREG32(0x6578, 0x10202);
638         WREG32(0x657C, 0x8008BEB8);
639         WREG32(0x6578, 0x10300);
640         WREG32(0x657C, 0x80B0BF78);
641         WREG32(0x6578, 0x10301);
642         WREG32(0x657C, 0x85008C20);
643         WREG32(0x6578, 0x10302);
644         WREG32(0x657C, 0x8020BEA0);
645         WREG32(0x6578, 0x10400);
646         WREG32(0x657C, 0x8028BF90);
647         WREG32(0x6578, 0x10401);
648         WREG32(0x657C, 0x85E08BC0);
649         WREG32(0x6578, 0x10402);
650         WREG32(0x657C, 0x8018BE90);
651         WREG32(0x6578, 0x10500);
652         WREG32(0x657C, 0xBFB8BFB0);
653         WREG32(0x6578, 0x10501);
654         WREG32(0x657C, 0x86C08B40);
655         WREG32(0x6578, 0x10502);
656         WREG32(0x657C, 0x8010BE90);
657         WREG32(0x6578, 0x10600);
658         WREG32(0x657C, 0xBF58BFC8);
659         WREG32(0x6578, 0x10601);
660         WREG32(0x657C, 0x87A08AA0);
661         WREG32(0x6578, 0x10602);
662         WREG32(0x657C, 0x8010BE98);
663         WREG32(0x6578, 0x10700);
664         WREG32(0x657C, 0xBF10BFF0);
665         WREG32(0x6578, 0x10701);
666         WREG32(0x657C, 0x886089E0);
667         WREG32(0x6578, 0x10702);
668         WREG32(0x657C, 0x8018BEB0);
669         WREG32(0x6578, 0x10800);
670         WREG32(0x657C, 0xBED8BFE8);
671         WREG32(0x6578, 0x10801);
672         WREG32(0x657C, 0x89408940);
673         WREG32(0x6578, 0x10802);
674         WREG32(0x657C, 0xBFE8BED8);
675         WREG32(0x6578, 0x20000);
676         WREG32(0x657C, 0x80008000);
677         WREG32(0x6578, 0x20001);
678         WREG32(0x657C, 0x90008000);
679         WREG32(0x6578, 0x20002);
680         WREG32(0x657C, 0x80008000);
681         WREG32(0x6578, 0x20003);
682         WREG32(0x657C, 0x80008000);
683         WREG32(0x6578, 0x20100);
684         WREG32(0x657C, 0x80108000);
685         WREG32(0x6578, 0x20101);
686         WREG32(0x657C, 0x8FE0BF70);
687         WREG32(0x6578, 0x20102);
688         WREG32(0x657C, 0xBFE880C0);
689         WREG32(0x6578, 0x20103);
690         WREG32(0x657C, 0x80008000);
691         WREG32(0x6578, 0x20200);
692         WREG32(0x657C, 0x8018BFF8);
693         WREG32(0x6578, 0x20201);
694         WREG32(0x657C, 0x8F80BF08);
695         WREG32(0x6578, 0x20202);
696         WREG32(0x657C, 0xBFD081A0);
697         WREG32(0x6578, 0x20203);
698         WREG32(0x657C, 0xBFF88000);
699         WREG32(0x6578, 0x20300);
700         WREG32(0x657C, 0x80188000);
701         WREG32(0x6578, 0x20301);
702         WREG32(0x657C, 0x8EE0BEC0);
703         WREG32(0x6578, 0x20302);
704         WREG32(0x657C, 0xBFB082A0);
705         WREG32(0x6578, 0x20303);
706         WREG32(0x657C, 0x80008000);
707         WREG32(0x6578, 0x20400);
708         WREG32(0x657C, 0x80188000);
709         WREG32(0x6578, 0x20401);
710         WREG32(0x657C, 0x8E00BEA0);
711         WREG32(0x6578, 0x20402);
712         WREG32(0x657C, 0xBF8883C0);
713         WREG32(0x6578, 0x20403);
714         WREG32(0x657C, 0x80008000);
715         WREG32(0x6578, 0x20500);
716         WREG32(0x657C, 0x80188000);
717         WREG32(0x6578, 0x20501);
718         WREG32(0x657C, 0x8D00BE90);
719         WREG32(0x6578, 0x20502);
720         WREG32(0x657C, 0xBF588500);
721         WREG32(0x6578, 0x20503);
722         WREG32(0x657C, 0x80008008);
723         WREG32(0x6578, 0x20600);
724         WREG32(0x657C, 0x80188000);
725         WREG32(0x6578, 0x20601);
726         WREG32(0x657C, 0x8BC0BE98);
727         WREG32(0x6578, 0x20602);
728         WREG32(0x657C, 0xBF308660);
729         WREG32(0x6578, 0x20603);
730         WREG32(0x657C, 0x80008008);
731         WREG32(0x6578, 0x20700);
732         WREG32(0x657C, 0x80108000);
733         WREG32(0x6578, 0x20701);
734         WREG32(0x657C, 0x8A80BEB0);
735         WREG32(0x6578, 0x20702);
736         WREG32(0x657C, 0xBF0087C0);
737         WREG32(0x6578, 0x20703);
738         WREG32(0x657C, 0x80008008);
739         WREG32(0x6578, 0x20800);
740         WREG32(0x657C, 0x80108000);
741         WREG32(0x6578, 0x20801);
742         WREG32(0x657C, 0x8920BED0);
743         WREG32(0x6578, 0x20802);
744         WREG32(0x657C, 0xBED08920);
745         WREG32(0x6578, 0x20803);
746         WREG32(0x657C, 0x80008010);
747         WREG32(0x6578, 0x30000);
748         WREG32(0x657C, 0x90008000);
749         WREG32(0x6578, 0x30001);
750         WREG32(0x657C, 0x80008000);
751         WREG32(0x6578, 0x30100);
752         WREG32(0x657C, 0x8FE0BF90);
753         WREG32(0x6578, 0x30101);
754         WREG32(0x657C, 0xBFF880A0);
755         WREG32(0x6578, 0x30200);
756         WREG32(0x657C, 0x8F60BF40);
757         WREG32(0x6578, 0x30201);
758         WREG32(0x657C, 0xBFE88180);
759         WREG32(0x6578, 0x30300);
760         WREG32(0x657C, 0x8EC0BF00);
761         WREG32(0x6578, 0x30301);
762         WREG32(0x657C, 0xBFC88280);
763         WREG32(0x6578, 0x30400);
764         WREG32(0x657C, 0x8DE0BEE0);
765         WREG32(0x6578, 0x30401);
766         WREG32(0x657C, 0xBFA083A0);
767         WREG32(0x6578, 0x30500);
768         WREG32(0x657C, 0x8CE0BED0);
769         WREG32(0x6578, 0x30501);
770         WREG32(0x657C, 0xBF7884E0);
771         WREG32(0x6578, 0x30600);
772         WREG32(0x657C, 0x8BA0BED8);
773         WREG32(0x6578, 0x30601);
774         WREG32(0x657C, 0xBF508640);
775         WREG32(0x6578, 0x30700);
776         WREG32(0x657C, 0x8A60BEE8);
777         WREG32(0x6578, 0x30701);
778         WREG32(0x657C, 0xBF2087A0);
779         WREG32(0x6578, 0x30800);
780         WREG32(0x657C, 0x8900BF00);
781         WREG32(0x6578, 0x30801);
782         WREG32(0x657C, 0xBF008900);
783 }
784
785 struct rv515_watermark {
786         u32        lb_request_fifo_depth;
787         fixed20_12 num_line_pair;
788         fixed20_12 estimated_width;
789         fixed20_12 worst_case_latency;
790         fixed20_12 consumption_rate;
791         fixed20_12 active_time;
792         fixed20_12 dbpp;
793         fixed20_12 priority_mark_max;
794         fixed20_12 priority_mark;
795         fixed20_12 sclk;
796 };
797
798 void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
799                                   struct radeon_crtc *crtc,
800                                   struct rv515_watermark *wm)
801 {
802         struct drm_display_mode *mode = &crtc->base.mode;
803         fixed20_12 a, b, c;
804         fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
805         fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
806
807         if (!crtc->base.enabled) {
808                 /* FIXME: wouldn't it better to set priority mark to maximum */
809                 wm->lb_request_fifo_depth = 4;
810                 return;
811         }
812
813         if (crtc->vsc.full > rfixed_const(2))
814                 wm->num_line_pair.full = rfixed_const(2);
815         else
816                 wm->num_line_pair.full = rfixed_const(1);
817
818         b.full = rfixed_const(mode->crtc_hdisplay);
819         c.full = rfixed_const(256);
820         a.full = rfixed_mul(wm->num_line_pair, b);
821         request_fifo_depth.full = rfixed_div(a, c);
822         if (a.full < rfixed_const(4)) {
823                 wm->lb_request_fifo_depth = 4;
824         } else {
825                 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
826         }
827
828         /* Determine consumption rate
829          *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
830          *  vtaps = number of vertical taps,
831          *  vsc = vertical scaling ratio, defined as source/destination
832          *  hsc = horizontal scaling ration, defined as source/destination
833          */
834         a.full = rfixed_const(mode->clock);
835         b.full = rfixed_const(1000);
836         a.full = rfixed_div(a, b);
837         pclk.full = rfixed_div(b, a);
838         if (crtc->rmx_type != RMX_OFF) {
839                 b.full = rfixed_const(2);
840                 if (crtc->vsc.full > b.full)
841                         b.full = crtc->vsc.full;
842                 b.full = rfixed_mul(b, crtc->hsc);
843                 c.full = rfixed_const(2);
844                 b.full = rfixed_div(b, c);
845                 consumption_time.full = rfixed_div(pclk, b);
846         } else {
847                 consumption_time.full = pclk.full;
848         }
849         a.full = rfixed_const(1);
850         wm->consumption_rate.full = rfixed_div(a, consumption_time);
851
852
853         /* Determine line time
854          *  LineTime = total time for one line of displayhtotal
855          *  LineTime = total number of horizontal pixels
856          *  pclk = pixel clock period(ns)
857          */
858         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
859         line_time.full = rfixed_mul(a, pclk);
860
861         /* Determine active time
862          *  ActiveTime = time of active region of display within one line,
863          *  hactive = total number of horizontal active pixels
864          *  htotal = total number of horizontal pixels
865          */
866         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
867         b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
868         wm->active_time.full = rfixed_mul(line_time, b);
869         wm->active_time.full = rfixed_div(wm->active_time, a);
870
871         /* Determine chunk time
872          * ChunkTime = the time it takes the DCP to send one chunk of data
873          * to the LB which consists of pipeline delay and inter chunk gap
874          * sclk = system clock(Mhz)
875          */
876         a.full = rfixed_const(600 * 1000);
877         chunk_time.full = rfixed_div(a, rdev->pm.sclk);
878         read_delay_latency.full = rfixed_const(1000);
879
880         /* Determine the worst case latency
881          * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
882          * WorstCaseLatency = worst case time from urgent to when the MC starts
883          *                    to return data
884          * READ_DELAY_IDLE_MAX = constant of 1us
885          * ChunkTime = time it takes the DCP to send one chunk of data to the LB
886          *             which consists of pipeline delay and inter chunk gap
887          */
888         if (rfixed_trunc(wm->num_line_pair) > 1) {
889                 a.full = rfixed_const(3);
890                 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
891                 wm->worst_case_latency.full += read_delay_latency.full;
892         } else {
893                 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
894         }
895
896         /* Determine the tolerable latency
897          * TolerableLatency = Any given request has only 1 line time
898          *                    for the data to be returned
899          * LBRequestFifoDepth = Number of chunk requests the LB can
900          *                      put into the request FIFO for a display
901          *  LineTime = total time for one line of display
902          *  ChunkTime = the time it takes the DCP to send one chunk
903          *              of data to the LB which consists of
904          *  pipeline delay and inter chunk gap
905          */
906         if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
907                 tolerable_latency.full = line_time.full;
908         } else {
909                 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
910                 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
911                 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
912                 tolerable_latency.full = line_time.full - tolerable_latency.full;
913         }
914         /* We assume worst case 32bits (4 bytes) */
915         wm->dbpp.full = rfixed_const(2 * 16);
916
917         /* Determine the maximum priority mark
918          *  width = viewport width in pixels
919          */
920         a.full = rfixed_const(16);
921         wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
922         wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
923
924         /* Determine estimated width */
925         estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
926         estimated_width.full = rfixed_div(estimated_width, consumption_time);
927         if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
928                 wm->priority_mark.full = rfixed_const(10);
929         } else {
930                 a.full = rfixed_const(16);
931                 wm->priority_mark.full = rfixed_div(estimated_width, a);
932                 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
933         }
934 }
935
936 void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
937 {
938         struct drm_display_mode *mode0 = NULL;
939         struct drm_display_mode *mode1 = NULL;
940         struct rv515_watermark wm0;
941         struct rv515_watermark wm1;
942         u32 tmp;
943         fixed20_12 priority_mark02, priority_mark12, fill_rate;
944         fixed20_12 a, b;
945
946         if (rdev->mode_info.crtcs[0]->base.enabled)
947                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
948         if (rdev->mode_info.crtcs[1]->base.enabled)
949                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
950         rs690_line_buffer_adjust(rdev, mode0, mode1);
951
952         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
953         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
954
955         tmp = wm0.lb_request_fifo_depth;
956         tmp |= wm1.lb_request_fifo_depth << 16;
957         WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
958
959         if (mode0 && mode1) {
960                 if (rfixed_trunc(wm0.dbpp) > 64)
961                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
962                 else
963                         a.full = wm0.num_line_pair.full;
964                 if (rfixed_trunc(wm1.dbpp) > 64)
965                         b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
966                 else
967                         b.full = wm1.num_line_pair.full;
968                 a.full += b.full;
969                 fill_rate.full = rfixed_div(wm0.sclk, a);
970                 if (wm0.consumption_rate.full > fill_rate.full) {
971                         b.full = wm0.consumption_rate.full - fill_rate.full;
972                         b.full = rfixed_mul(b, wm0.active_time);
973                         a.full = rfixed_const(16);
974                         b.full = rfixed_div(b, a);
975                         a.full = rfixed_mul(wm0.worst_case_latency,
976                                                 wm0.consumption_rate);
977                         priority_mark02.full = a.full + b.full;
978                 } else {
979                         a.full = rfixed_mul(wm0.worst_case_latency,
980                                                 wm0.consumption_rate);
981                         b.full = rfixed_const(16 * 1000);
982                         priority_mark02.full = rfixed_div(a, b);
983                 }
984                 if (wm1.consumption_rate.full > fill_rate.full) {
985                         b.full = wm1.consumption_rate.full - fill_rate.full;
986                         b.full = rfixed_mul(b, wm1.active_time);
987                         a.full = rfixed_const(16);
988                         b.full = rfixed_div(b, a);
989                         a.full = rfixed_mul(wm1.worst_case_latency,
990                                                 wm1.consumption_rate);
991                         priority_mark12.full = a.full + b.full;
992                 } else {
993                         a.full = rfixed_mul(wm1.worst_case_latency,
994                                                 wm1.consumption_rate);
995                         b.full = rfixed_const(16 * 1000);
996                         priority_mark12.full = rfixed_div(a, b);
997                 }
998                 if (wm0.priority_mark.full > priority_mark02.full)
999                         priority_mark02.full = wm0.priority_mark.full;
1000                 if (rfixed_trunc(priority_mark02) < 0)
1001                         priority_mark02.full = 0;
1002                 if (wm0.priority_mark_max.full > priority_mark02.full)
1003                         priority_mark02.full = wm0.priority_mark_max.full;
1004                 if (wm1.priority_mark.full > priority_mark12.full)
1005                         priority_mark12.full = wm1.priority_mark.full;
1006                 if (rfixed_trunc(priority_mark12) < 0)
1007                         priority_mark12.full = 0;
1008                 if (wm1.priority_mark_max.full > priority_mark12.full)
1009                         priority_mark12.full = wm1.priority_mark_max.full;
1010                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1011                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1012                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1013                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1014         } else if (mode0) {
1015                 if (rfixed_trunc(wm0.dbpp) > 64)
1016                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
1017                 else
1018                         a.full = wm0.num_line_pair.full;
1019                 fill_rate.full = rfixed_div(wm0.sclk, a);
1020                 if (wm0.consumption_rate.full > fill_rate.full) {
1021                         b.full = wm0.consumption_rate.full - fill_rate.full;
1022                         b.full = rfixed_mul(b, wm0.active_time);
1023                         a.full = rfixed_const(16);
1024                         b.full = rfixed_div(b, a);
1025                         a.full = rfixed_mul(wm0.worst_case_latency,
1026                                                 wm0.consumption_rate);
1027                         priority_mark02.full = a.full + b.full;
1028                 } else {
1029                         a.full = rfixed_mul(wm0.worst_case_latency,
1030                                                 wm0.consumption_rate);
1031                         b.full = rfixed_const(16);
1032                         priority_mark02.full = rfixed_div(a, b);
1033                 }
1034                 if (wm0.priority_mark.full > priority_mark02.full)
1035                         priority_mark02.full = wm0.priority_mark.full;
1036                 if (rfixed_trunc(priority_mark02) < 0)
1037                         priority_mark02.full = 0;
1038                 if (wm0.priority_mark_max.full > priority_mark02.full)
1039                         priority_mark02.full = wm0.priority_mark_max.full;
1040                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1041                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1042                 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1043                 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1044         } else {
1045                 if (rfixed_trunc(wm1.dbpp) > 64)
1046                         a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
1047                 else
1048                         a.full = wm1.num_line_pair.full;
1049                 fill_rate.full = rfixed_div(wm1.sclk, a);
1050                 if (wm1.consumption_rate.full > fill_rate.full) {
1051                         b.full = wm1.consumption_rate.full - fill_rate.full;
1052                         b.full = rfixed_mul(b, wm1.active_time);
1053                         a.full = rfixed_const(16);
1054                         b.full = rfixed_div(b, a);
1055                         a.full = rfixed_mul(wm1.worst_case_latency,
1056                                                 wm1.consumption_rate);
1057                         priority_mark12.full = a.full + b.full;
1058                 } else {
1059                         a.full = rfixed_mul(wm1.worst_case_latency,
1060                                                 wm1.consumption_rate);
1061                         b.full = rfixed_const(16 * 1000);
1062                         priority_mark12.full = rfixed_div(a, b);
1063                 }
1064                 if (wm1.priority_mark.full > priority_mark12.full)
1065                         priority_mark12.full = wm1.priority_mark.full;
1066                 if (rfixed_trunc(priority_mark12) < 0)
1067                         priority_mark12.full = 0;
1068                 if (wm1.priority_mark_max.full > priority_mark12.full)
1069                         priority_mark12.full = wm1.priority_mark_max.full;
1070                 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1071                 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1072                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1073                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1074         }
1075 }
1076
1077 void rv515_bandwidth_update(struct radeon_device *rdev)
1078 {
1079         uint32_t tmp;
1080         struct drm_display_mode *mode0 = NULL;
1081         struct drm_display_mode *mode1 = NULL;
1082
1083         if (rdev->mode_info.crtcs[0]->base.enabled)
1084                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1085         if (rdev->mode_info.crtcs[1]->base.enabled)
1086                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1087         /*
1088          * Set display0/1 priority up in the memory controller for
1089          * modes if the user specifies HIGH for displaypriority
1090          * option.
1091          */
1092         if (rdev->disp_priority == 2) {
1093                 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1094                 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1095                 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1096                 if (mode1)
1097                         tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1098                 if (mode0)
1099                         tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1100                 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1101         }
1102         rv515_bandwidth_avivo_update(rdev);
1103 }