2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
40 static void evergreen_gpu_init(struct radeon_device *rdev);
41 void evergreen_fini(struct radeon_device *rdev);
42 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
44 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
46 /* enable the pflip int */
47 radeon_irq_kms_pflip_irq_get(rdev, crtc);
50 void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
52 /* disable the pflip int */
53 radeon_irq_kms_pflip_irq_put(rdev, crtc);
56 u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
58 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
59 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
61 /* Lock the graphics update lock */
62 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
63 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
65 /* update the scanout addresses */
66 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
67 upper_32_bits(crtc_base));
68 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
71 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
72 upper_32_bits(crtc_base));
73 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
76 /* Wait for update_pending to go high. */
77 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
80 /* Unlock the lock, so double-buffering can take place inside vblank */
81 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
82 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
84 /* Return current update_pending status: */
85 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
88 /* get temperature in millidegrees */
89 int evergreen_get_temp(struct radeon_device *rdev)
91 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
97 else if (temp & 0x200)
99 else if (temp & 0x100) {
100 actual_temp = temp & 0x1ff;
101 actual_temp |= ~0x1ff;
103 actual_temp = temp & 0xff;
105 return (actual_temp * 1000) / 2;
108 int sumo_get_temp(struct radeon_device *rdev)
110 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
111 int actual_temp = temp - 49;
113 return actual_temp * 1000;
116 void evergreen_pm_misc(struct radeon_device *rdev)
118 int req_ps_idx = rdev->pm.requested_power_state_index;
119 int req_cm_idx = rdev->pm.requested_clock_mode_index;
120 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
121 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
123 if (voltage->type == VOLTAGE_SW) {
124 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
125 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
126 rdev->pm.current_vddc = voltage->voltage;
127 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
129 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
130 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
131 rdev->pm.current_vddci = voltage->vddci;
132 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
137 void evergreen_pm_prepare(struct radeon_device *rdev)
139 struct drm_device *ddev = rdev->ddev;
140 struct drm_crtc *crtc;
141 struct radeon_crtc *radeon_crtc;
144 /* disable any active CRTCs */
145 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
146 radeon_crtc = to_radeon_crtc(crtc);
147 if (radeon_crtc->enabled) {
148 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
149 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
150 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
155 void evergreen_pm_finish(struct radeon_device *rdev)
157 struct drm_device *ddev = rdev->ddev;
158 struct drm_crtc *crtc;
159 struct radeon_crtc *radeon_crtc;
162 /* enable any active CRTCs */
163 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
164 radeon_crtc = to_radeon_crtc(crtc);
165 if (radeon_crtc->enabled) {
166 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
167 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
168 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
173 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
175 bool connected = false;
179 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
183 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
187 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
191 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
195 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
199 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
209 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
210 enum radeon_hpd_id hpd)
213 bool connected = evergreen_hpd_sense(rdev, hpd);
217 tmp = RREG32(DC_HPD1_INT_CONTROL);
219 tmp &= ~DC_HPDx_INT_POLARITY;
221 tmp |= DC_HPDx_INT_POLARITY;
222 WREG32(DC_HPD1_INT_CONTROL, tmp);
225 tmp = RREG32(DC_HPD2_INT_CONTROL);
227 tmp &= ~DC_HPDx_INT_POLARITY;
229 tmp |= DC_HPDx_INT_POLARITY;
230 WREG32(DC_HPD2_INT_CONTROL, tmp);
233 tmp = RREG32(DC_HPD3_INT_CONTROL);
235 tmp &= ~DC_HPDx_INT_POLARITY;
237 tmp |= DC_HPDx_INT_POLARITY;
238 WREG32(DC_HPD3_INT_CONTROL, tmp);
241 tmp = RREG32(DC_HPD4_INT_CONTROL);
243 tmp &= ~DC_HPDx_INT_POLARITY;
245 tmp |= DC_HPDx_INT_POLARITY;
246 WREG32(DC_HPD4_INT_CONTROL, tmp);
249 tmp = RREG32(DC_HPD5_INT_CONTROL);
251 tmp &= ~DC_HPDx_INT_POLARITY;
253 tmp |= DC_HPDx_INT_POLARITY;
254 WREG32(DC_HPD5_INT_CONTROL, tmp);
257 tmp = RREG32(DC_HPD6_INT_CONTROL);
259 tmp &= ~DC_HPDx_INT_POLARITY;
261 tmp |= DC_HPDx_INT_POLARITY;
262 WREG32(DC_HPD6_INT_CONTROL, tmp);
269 void evergreen_hpd_init(struct radeon_device *rdev)
271 struct drm_device *dev = rdev->ddev;
272 struct drm_connector *connector;
273 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
274 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
276 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
277 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
278 switch (radeon_connector->hpd.hpd) {
280 WREG32(DC_HPD1_CONTROL, tmp);
281 rdev->irq.hpd[0] = true;
284 WREG32(DC_HPD2_CONTROL, tmp);
285 rdev->irq.hpd[1] = true;
288 WREG32(DC_HPD3_CONTROL, tmp);
289 rdev->irq.hpd[2] = true;
292 WREG32(DC_HPD4_CONTROL, tmp);
293 rdev->irq.hpd[3] = true;
296 WREG32(DC_HPD5_CONTROL, tmp);
297 rdev->irq.hpd[4] = true;
300 WREG32(DC_HPD6_CONTROL, tmp);
301 rdev->irq.hpd[5] = true;
307 if (rdev->irq.installed)
308 evergreen_irq_set(rdev);
311 void evergreen_hpd_fini(struct radeon_device *rdev)
313 struct drm_device *dev = rdev->ddev;
314 struct drm_connector *connector;
316 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
317 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
318 switch (radeon_connector->hpd.hpd) {
320 WREG32(DC_HPD1_CONTROL, 0);
321 rdev->irq.hpd[0] = false;
324 WREG32(DC_HPD2_CONTROL, 0);
325 rdev->irq.hpd[1] = false;
328 WREG32(DC_HPD3_CONTROL, 0);
329 rdev->irq.hpd[2] = false;
332 WREG32(DC_HPD4_CONTROL, 0);
333 rdev->irq.hpd[3] = false;
336 WREG32(DC_HPD5_CONTROL, 0);
337 rdev->irq.hpd[4] = false;
340 WREG32(DC_HPD6_CONTROL, 0);
341 rdev->irq.hpd[5] = false;
349 /* watermark setup */
351 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
352 struct radeon_crtc *radeon_crtc,
353 struct drm_display_mode *mode,
354 struct drm_display_mode *other_mode)
359 * There are 3 line buffers, each one shared by 2 display controllers.
360 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
361 * the display controllers. The paritioning is done via one of four
362 * preset allocations specified in bits 2:0:
363 * first display controller
364 * 0 - first half of lb (3840 * 2)
365 * 1 - first 3/4 of lb (5760 * 2)
366 * 2 - whole lb (7680 * 2), other crtc must be disabled
367 * 3 - first 1/4 of lb (1920 * 2)
368 * second display controller
369 * 4 - second half of lb (3840 * 2)
370 * 5 - second 3/4 of lb (5760 * 2)
371 * 6 - whole lb (7680 * 2), other crtc must be disabled
372 * 7 - last 1/4 of lb (1920 * 2)
374 /* this can get tricky if we have two large displays on a paired group
375 * of crtcs. Ideally for multiple large displays we'd assign them to
376 * non-linked crtcs for maximum line buffer allocation.
378 if (radeon_crtc->base.enabled && mode) {
386 /* second controller of the pair uses second half of the lb */
387 if (radeon_crtc->crtc_id % 2)
389 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
391 if (radeon_crtc->base.enabled && mode) {
396 if (ASIC_IS_DCE5(rdev))
402 if (ASIC_IS_DCE5(rdev))
408 if (ASIC_IS_DCE5(rdev))
414 if (ASIC_IS_DCE5(rdev))
421 /* controller not enabled, so no lb used */
425 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
427 u32 tmp = RREG32(MC_SHARED_CHMAP);
429 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
442 struct evergreen_wm_params {
443 u32 dram_channels; /* number of dram channels */
444 u32 yclk; /* bandwidth per dram data pin in kHz */
445 u32 sclk; /* engine clock in kHz */
446 u32 disp_clk; /* display clock in kHz */
447 u32 src_width; /* viewport width */
448 u32 active_time; /* active display time in ns */
449 u32 blank_time; /* blank time in ns */
450 bool interlaced; /* mode is interlaced */
451 fixed20_12 vsc; /* vertical scale ratio */
452 u32 num_heads; /* number of active crtcs */
453 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
454 u32 lb_size; /* line buffer allocated to pipe */
455 u32 vtaps; /* vertical scaler taps */
458 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
460 /* Calculate DRAM Bandwidth and the part allocated to display. */
461 fixed20_12 dram_efficiency; /* 0.7 */
462 fixed20_12 yclk, dram_channels, bandwidth;
465 a.full = dfixed_const(1000);
466 yclk.full = dfixed_const(wm->yclk);
467 yclk.full = dfixed_div(yclk, a);
468 dram_channels.full = dfixed_const(wm->dram_channels * 4);
469 a.full = dfixed_const(10);
470 dram_efficiency.full = dfixed_const(7);
471 dram_efficiency.full = dfixed_div(dram_efficiency, a);
472 bandwidth.full = dfixed_mul(dram_channels, yclk);
473 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
475 return dfixed_trunc(bandwidth);
478 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
480 /* Calculate DRAM Bandwidth and the part allocated to display. */
481 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
482 fixed20_12 yclk, dram_channels, bandwidth;
485 a.full = dfixed_const(1000);
486 yclk.full = dfixed_const(wm->yclk);
487 yclk.full = dfixed_div(yclk, a);
488 dram_channels.full = dfixed_const(wm->dram_channels * 4);
489 a.full = dfixed_const(10);
490 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
491 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
492 bandwidth.full = dfixed_mul(dram_channels, yclk);
493 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
495 return dfixed_trunc(bandwidth);
498 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
500 /* Calculate the display Data return Bandwidth */
501 fixed20_12 return_efficiency; /* 0.8 */
502 fixed20_12 sclk, bandwidth;
505 a.full = dfixed_const(1000);
506 sclk.full = dfixed_const(wm->sclk);
507 sclk.full = dfixed_div(sclk, a);
508 a.full = dfixed_const(10);
509 return_efficiency.full = dfixed_const(8);
510 return_efficiency.full = dfixed_div(return_efficiency, a);
511 a.full = dfixed_const(32);
512 bandwidth.full = dfixed_mul(a, sclk);
513 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
515 return dfixed_trunc(bandwidth);
518 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
520 /* Calculate the DMIF Request Bandwidth */
521 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
522 fixed20_12 disp_clk, bandwidth;
525 a.full = dfixed_const(1000);
526 disp_clk.full = dfixed_const(wm->disp_clk);
527 disp_clk.full = dfixed_div(disp_clk, a);
528 a.full = dfixed_const(10);
529 disp_clk_request_efficiency.full = dfixed_const(8);
530 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
531 a.full = dfixed_const(32);
532 bandwidth.full = dfixed_mul(a, disp_clk);
533 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
535 return dfixed_trunc(bandwidth);
538 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
540 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
541 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
542 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
543 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
545 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
548 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
550 /* Calculate the display mode Average Bandwidth
551 * DisplayMode should contain the source and destination dimensions,
555 fixed20_12 line_time;
556 fixed20_12 src_width;
557 fixed20_12 bandwidth;
560 a.full = dfixed_const(1000);
561 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
562 line_time.full = dfixed_div(line_time, a);
563 bpp.full = dfixed_const(wm->bytes_per_pixel);
564 src_width.full = dfixed_const(wm->src_width);
565 bandwidth.full = dfixed_mul(src_width, bpp);
566 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
567 bandwidth.full = dfixed_div(bandwidth, line_time);
569 return dfixed_trunc(bandwidth);
572 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
574 /* First calcualte the latency in ns */
575 u32 mc_latency = 2000; /* 2000 ns. */
576 u32 available_bandwidth = evergreen_available_bandwidth(wm);
577 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
578 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
579 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
580 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
581 (wm->num_heads * cursor_line_pair_return_time);
582 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
583 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
586 if (wm->num_heads == 0)
589 a.full = dfixed_const(2);
590 b.full = dfixed_const(1);
591 if ((wm->vsc.full > a.full) ||
592 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
594 ((wm->vsc.full >= a.full) && wm->interlaced))
595 max_src_lines_per_dst_line = 4;
597 max_src_lines_per_dst_line = 2;
599 a.full = dfixed_const(available_bandwidth);
600 b.full = dfixed_const(wm->num_heads);
601 a.full = dfixed_div(a, b);
603 b.full = dfixed_const(1000);
604 c.full = dfixed_const(wm->disp_clk);
605 b.full = dfixed_div(c, b);
606 c.full = dfixed_const(wm->bytes_per_pixel);
607 b.full = dfixed_mul(b, c);
609 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
611 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
612 b.full = dfixed_const(1000);
613 c.full = dfixed_const(lb_fill_bw);
614 b.full = dfixed_div(c, b);
615 a.full = dfixed_div(a, b);
616 line_fill_time = dfixed_trunc(a);
618 if (line_fill_time < wm->active_time)
621 return latency + (line_fill_time - wm->active_time);
625 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
627 if (evergreen_average_bandwidth(wm) <=
628 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
634 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
636 if (evergreen_average_bandwidth(wm) <=
637 (evergreen_available_bandwidth(wm) / wm->num_heads))
643 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
645 u32 lb_partitions = wm->lb_size / wm->src_width;
646 u32 line_time = wm->active_time + wm->blank_time;
647 u32 latency_tolerant_lines;
651 a.full = dfixed_const(1);
652 if (wm->vsc.full > a.full)
653 latency_tolerant_lines = 1;
655 if (lb_partitions <= (wm->vtaps + 1))
656 latency_tolerant_lines = 1;
658 latency_tolerant_lines = 2;
661 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
663 if (evergreen_latency_watermark(wm) <= latency_hiding)
669 static void evergreen_program_watermarks(struct radeon_device *rdev,
670 struct radeon_crtc *radeon_crtc,
671 u32 lb_size, u32 num_heads)
673 struct drm_display_mode *mode = &radeon_crtc->base.mode;
674 struct evergreen_wm_params wm;
677 u32 latency_watermark_a = 0, latency_watermark_b = 0;
678 u32 priority_a_mark = 0, priority_b_mark = 0;
679 u32 priority_a_cnt = PRIORITY_OFF;
680 u32 priority_b_cnt = PRIORITY_OFF;
681 u32 pipe_offset = radeon_crtc->crtc_id * 16;
682 u32 tmp, arb_control3;
685 if (radeon_crtc->base.enabled && num_heads && mode) {
686 pixel_period = 1000000 / (u32)mode->clock;
687 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
691 wm.yclk = rdev->pm.current_mclk * 10;
692 wm.sclk = rdev->pm.current_sclk * 10;
693 wm.disp_clk = mode->clock;
694 wm.src_width = mode->crtc_hdisplay;
695 wm.active_time = mode->crtc_hdisplay * pixel_period;
696 wm.blank_time = line_time - wm.active_time;
697 wm.interlaced = false;
698 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
699 wm.interlaced = true;
700 wm.vsc = radeon_crtc->vsc;
702 if (radeon_crtc->rmx_type != RMX_OFF)
704 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
705 wm.lb_size = lb_size;
706 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
707 wm.num_heads = num_heads;
709 /* set for high clocks */
710 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
711 /* set for low clocks */
712 /* wm.yclk = low clk; wm.sclk = low clk */
713 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
715 /* possibly force display priority to high */
716 /* should really do this at mode validation time... */
717 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
718 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
719 !evergreen_check_latency_hiding(&wm) ||
720 (rdev->disp_priority == 2)) {
721 DRM_INFO("force priority to high\n");
722 priority_a_cnt |= PRIORITY_ALWAYS_ON;
723 priority_b_cnt |= PRIORITY_ALWAYS_ON;
726 a.full = dfixed_const(1000);
727 b.full = dfixed_const(mode->clock);
728 b.full = dfixed_div(b, a);
729 c.full = dfixed_const(latency_watermark_a);
730 c.full = dfixed_mul(c, b);
731 c.full = dfixed_mul(c, radeon_crtc->hsc);
732 c.full = dfixed_div(c, a);
733 a.full = dfixed_const(16);
734 c.full = dfixed_div(c, a);
735 priority_a_mark = dfixed_trunc(c);
736 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
738 a.full = dfixed_const(1000);
739 b.full = dfixed_const(mode->clock);
740 b.full = dfixed_div(b, a);
741 c.full = dfixed_const(latency_watermark_b);
742 c.full = dfixed_mul(c, b);
743 c.full = dfixed_mul(c, radeon_crtc->hsc);
744 c.full = dfixed_div(c, a);
745 a.full = dfixed_const(16);
746 c.full = dfixed_div(c, a);
747 priority_b_mark = dfixed_trunc(c);
748 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
752 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
754 tmp &= ~LATENCY_WATERMARK_MASK(3);
755 tmp |= LATENCY_WATERMARK_MASK(1);
756 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
757 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
758 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
759 LATENCY_HIGH_WATERMARK(line_time)));
761 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
762 tmp &= ~LATENCY_WATERMARK_MASK(3);
763 tmp |= LATENCY_WATERMARK_MASK(2);
764 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
765 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
766 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
767 LATENCY_HIGH_WATERMARK(line_time)));
768 /* restore original selection */
769 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
771 /* write the priority marks */
772 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
773 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
777 void evergreen_bandwidth_update(struct radeon_device *rdev)
779 struct drm_display_mode *mode0 = NULL;
780 struct drm_display_mode *mode1 = NULL;
781 u32 num_heads = 0, lb_size;
784 radeon_update_display_priority(rdev);
786 for (i = 0; i < rdev->num_crtc; i++) {
787 if (rdev->mode_info.crtcs[i]->base.enabled)
790 for (i = 0; i < rdev->num_crtc; i += 2) {
791 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
792 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
793 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
794 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
795 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
796 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
800 int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
805 for (i = 0; i < rdev->usec_timeout; i++) {
807 tmp = RREG32(SRBM_STATUS) & 0x1F00;
818 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
823 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
825 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
826 for (i = 0; i < rdev->usec_timeout; i++) {
828 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
829 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
831 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
841 int evergreen_pcie_gart_enable(struct radeon_device *rdev)
846 if (rdev->gart.table.vram.robj == NULL) {
847 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
850 r = radeon_gart_table_vram_pin(rdev);
853 radeon_gart_restore(rdev);
855 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
856 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
857 EFFECTIVE_L2_QUEUE_SIZE(7));
858 WREG32(VM_L2_CNTL2, 0);
859 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
860 /* Setup TLB control */
861 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
862 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
863 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
864 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
865 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
866 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
867 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
868 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
869 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
870 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
871 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
872 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
873 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
874 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
875 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
876 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
877 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
878 (u32)(rdev->dummy_page.addr >> 12));
879 WREG32(VM_CONTEXT1_CNTL, 0);
881 evergreen_pcie_gart_tlb_flush(rdev);
882 rdev->gart.ready = true;
886 void evergreen_pcie_gart_disable(struct radeon_device *rdev)
891 /* Disable all tables */
892 WREG32(VM_CONTEXT0_CNTL, 0);
893 WREG32(VM_CONTEXT1_CNTL, 0);
896 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
897 EFFECTIVE_L2_QUEUE_SIZE(7));
898 WREG32(VM_L2_CNTL2, 0);
899 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
900 /* Setup TLB control */
901 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
902 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
903 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
904 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
905 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
906 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
907 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
908 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
909 if (rdev->gart.table.vram.robj) {
910 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
911 if (likely(r == 0)) {
912 radeon_bo_kunmap(rdev->gart.table.vram.robj);
913 radeon_bo_unpin(rdev->gart.table.vram.robj);
914 radeon_bo_unreserve(rdev->gart.table.vram.robj);
919 void evergreen_pcie_gart_fini(struct radeon_device *rdev)
921 evergreen_pcie_gart_disable(rdev);
922 radeon_gart_table_vram_free(rdev);
923 radeon_gart_fini(rdev);
927 void evergreen_agp_enable(struct radeon_device *rdev)
932 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
933 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
934 EFFECTIVE_L2_QUEUE_SIZE(7));
935 WREG32(VM_L2_CNTL2, 0);
936 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
937 /* Setup TLB control */
938 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
939 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
940 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
941 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
942 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
943 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
944 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
945 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
946 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
947 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
948 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
949 WREG32(VM_CONTEXT0_CNTL, 0);
950 WREG32(VM_CONTEXT1_CNTL, 0);
953 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
955 save->vga_control[0] = RREG32(D1VGA_CONTROL);
956 save->vga_control[1] = RREG32(D2VGA_CONTROL);
957 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
958 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
959 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
960 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
961 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
962 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
963 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
964 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
965 if (!(rdev->flags & RADEON_IS_IGP)) {
966 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
967 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
968 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
969 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
973 WREG32(VGA_RENDER_CONTROL, 0);
974 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
975 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
976 if (!(rdev->flags & RADEON_IS_IGP)) {
977 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
978 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
979 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
980 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
982 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
983 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
984 if (!(rdev->flags & RADEON_IS_IGP)) {
985 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
986 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
987 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
988 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
990 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
991 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
992 if (!(rdev->flags & RADEON_IS_IGP)) {
993 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
994 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
995 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
996 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
999 WREG32(D1VGA_CONTROL, 0);
1000 WREG32(D2VGA_CONTROL, 0);
1001 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
1002 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
1003 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
1004 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
1007 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1009 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1010 upper_32_bits(rdev->mc.vram_start));
1011 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1012 upper_32_bits(rdev->mc.vram_start));
1013 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1014 (u32)rdev->mc.vram_start);
1015 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1016 (u32)rdev->mc.vram_start);
1018 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1019 upper_32_bits(rdev->mc.vram_start));
1020 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1021 upper_32_bits(rdev->mc.vram_start));
1022 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1023 (u32)rdev->mc.vram_start);
1024 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1025 (u32)rdev->mc.vram_start);
1027 if (!(rdev->flags & RADEON_IS_IGP)) {
1028 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1029 upper_32_bits(rdev->mc.vram_start));
1030 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1031 upper_32_bits(rdev->mc.vram_start));
1032 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1033 (u32)rdev->mc.vram_start);
1034 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1035 (u32)rdev->mc.vram_start);
1037 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1038 upper_32_bits(rdev->mc.vram_start));
1039 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1040 upper_32_bits(rdev->mc.vram_start));
1041 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1042 (u32)rdev->mc.vram_start);
1043 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1044 (u32)rdev->mc.vram_start);
1046 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1047 upper_32_bits(rdev->mc.vram_start));
1048 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1049 upper_32_bits(rdev->mc.vram_start));
1050 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1051 (u32)rdev->mc.vram_start);
1052 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1053 (u32)rdev->mc.vram_start);
1055 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
1056 upper_32_bits(rdev->mc.vram_start));
1057 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
1058 upper_32_bits(rdev->mc.vram_start));
1059 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1060 (u32)rdev->mc.vram_start);
1061 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1062 (u32)rdev->mc.vram_start);
1065 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1066 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1067 /* Unlock host access */
1068 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1070 /* Restore video state */
1071 WREG32(D1VGA_CONTROL, save->vga_control[0]);
1072 WREG32(D2VGA_CONTROL, save->vga_control[1]);
1073 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
1074 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
1075 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
1076 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
1077 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
1078 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
1079 if (!(rdev->flags & RADEON_IS_IGP)) {
1080 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
1081 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
1082 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
1083 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
1085 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
1086 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
1087 if (!(rdev->flags & RADEON_IS_IGP)) {
1088 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
1089 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
1090 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
1091 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
1093 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1094 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1095 if (!(rdev->flags & RADEON_IS_IGP)) {
1096 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1097 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1098 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1099 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1101 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1104 void evergreen_mc_program(struct radeon_device *rdev)
1106 struct evergreen_mc_save save;
1110 /* Initialize HDP */
1111 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1112 WREG32((0x2c14 + j), 0x00000000);
1113 WREG32((0x2c18 + j), 0x00000000);
1114 WREG32((0x2c1c + j), 0x00000000);
1115 WREG32((0x2c20 + j), 0x00000000);
1116 WREG32((0x2c24 + j), 0x00000000);
1118 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1120 evergreen_mc_stop(rdev, &save);
1121 if (evergreen_mc_wait_for_idle(rdev)) {
1122 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1124 /* Lockout access through VGA aperture*/
1125 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1126 /* Update configuration */
1127 if (rdev->flags & RADEON_IS_AGP) {
1128 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1129 /* VRAM before AGP */
1130 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1131 rdev->mc.vram_start >> 12);
1132 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1133 rdev->mc.gtt_end >> 12);
1135 /* VRAM after AGP */
1136 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1137 rdev->mc.gtt_start >> 12);
1138 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1139 rdev->mc.vram_end >> 12);
1142 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1143 rdev->mc.vram_start >> 12);
1144 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1145 rdev->mc.vram_end >> 12);
1147 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1148 if (rdev->flags & RADEON_IS_IGP) {
1149 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1150 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1151 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1152 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1154 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1155 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1156 WREG32(MC_VM_FB_LOCATION, tmp);
1157 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1158 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
1159 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1160 if (rdev->flags & RADEON_IS_AGP) {
1161 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1162 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1163 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1165 WREG32(MC_VM_AGP_BASE, 0);
1166 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1167 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1169 if (evergreen_mc_wait_for_idle(rdev)) {
1170 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1172 evergreen_mc_resume(rdev, &save);
1173 /* we need to own VRAM, so turn off the VGA renderer here
1174 * to stop it overwriting our objects */
1175 rv515_vga_render_disable(rdev);
1181 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1183 /* set to DX10/11 mode */
1184 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
1185 radeon_ring_write(rdev, 1);
1186 /* FIXME: implement */
1187 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1188 radeon_ring_write(rdev,
1192 (ib->gpu_addr & 0xFFFFFFFC));
1193 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1194 radeon_ring_write(rdev, ib->length_dw);
1198 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1200 const __be32 *fw_data;
1203 if (!rdev->me_fw || !rdev->pfp_fw)
1211 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1213 fw_data = (const __be32 *)rdev->pfp_fw->data;
1214 WREG32(CP_PFP_UCODE_ADDR, 0);
1215 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1216 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1217 WREG32(CP_PFP_UCODE_ADDR, 0);
1219 fw_data = (const __be32 *)rdev->me_fw->data;
1220 WREG32(CP_ME_RAM_WADDR, 0);
1221 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1222 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1224 WREG32(CP_PFP_UCODE_ADDR, 0);
1225 WREG32(CP_ME_RAM_WADDR, 0);
1226 WREG32(CP_ME_RAM_RADDR, 0);
1230 static int evergreen_cp_start(struct radeon_device *rdev)
1235 r = radeon_ring_lock(rdev, 7);
1237 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1240 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1241 radeon_ring_write(rdev, 0x1);
1242 radeon_ring_write(rdev, 0x0);
1243 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1244 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1245 radeon_ring_write(rdev, 0);
1246 radeon_ring_write(rdev, 0);
1247 radeon_ring_unlock_commit(rdev);
1250 WREG32(CP_ME_CNTL, cp_me);
1252 r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1254 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1258 /* setup clear context state */
1259 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1260 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1262 for (i = 0; i < evergreen_default_size; i++)
1263 radeon_ring_write(rdev, evergreen_default_state[i]);
1265 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1266 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
1268 /* set clear context state */
1269 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
1270 radeon_ring_write(rdev, 0);
1272 /* SQ_VTX_BASE_VTX_LOC */
1273 radeon_ring_write(rdev, 0xc0026f00);
1274 radeon_ring_write(rdev, 0x00000000);
1275 radeon_ring_write(rdev, 0x00000000);
1276 radeon_ring_write(rdev, 0x00000000);
1279 radeon_ring_write(rdev, 0xc0036f00);
1280 radeon_ring_write(rdev, 0x00000bc4);
1281 radeon_ring_write(rdev, 0xffffffff);
1282 radeon_ring_write(rdev, 0xffffffff);
1283 radeon_ring_write(rdev, 0xffffffff);
1285 radeon_ring_write(rdev, 0xc0026900);
1286 radeon_ring_write(rdev, 0x00000316);
1287 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1288 radeon_ring_write(rdev, 0x00000010); /* */
1290 radeon_ring_unlock_commit(rdev);
1295 int evergreen_cp_resume(struct radeon_device *rdev)
1301 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1302 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1307 RREG32(GRBM_SOFT_RESET);
1309 WREG32(GRBM_SOFT_RESET, 0);
1310 RREG32(GRBM_SOFT_RESET);
1312 /* Set ring buffer size */
1313 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1314 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1316 tmp |= BUF_SWAP_32BIT;
1318 WREG32(CP_RB_CNTL, tmp);
1319 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1321 /* Set the write pointer delay */
1322 WREG32(CP_RB_WPTR_DELAY, 0);
1324 /* Initialize the ring buffer's read and write pointers */
1325 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1326 WREG32(CP_RB_RPTR_WR, 0);
1327 WREG32(CP_RB_WPTR, 0);
1329 /* set the wb address wether it's enabled or not */
1330 WREG32(CP_RB_RPTR_ADDR,
1334 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1335 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1336 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1338 if (rdev->wb.enabled)
1339 WREG32(SCRATCH_UMSK, 0xff);
1341 tmp |= RB_NO_UPDATE;
1342 WREG32(SCRATCH_UMSK, 0);
1346 WREG32(CP_RB_CNTL, tmp);
1348 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1349 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1351 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1352 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1354 evergreen_cp_start(rdev);
1355 rdev->cp.ready = true;
1356 r = radeon_ring_test(rdev);
1358 rdev->cp.ready = false;
1367 static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1370 u32 backend_disable_mask)
1372 u32 backend_map = 0;
1373 u32 enabled_backends_mask = 0;
1374 u32 enabled_backends_count = 0;
1376 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1377 u32 cur_backend = 0;
1379 bool force_no_swizzle;
1381 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1382 num_tile_pipes = EVERGREEN_MAX_PIPES;
1383 if (num_tile_pipes < 1)
1385 if (num_backends > EVERGREEN_MAX_BACKENDS)
1386 num_backends = EVERGREEN_MAX_BACKENDS;
1387 if (num_backends < 1)
1390 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1391 if (((backend_disable_mask >> i) & 1) == 0) {
1392 enabled_backends_mask |= (1 << i);
1393 ++enabled_backends_count;
1395 if (enabled_backends_count == num_backends)
1399 if (enabled_backends_count == 0) {
1400 enabled_backends_mask = 1;
1401 enabled_backends_count = 1;
1404 if (enabled_backends_count != num_backends)
1405 num_backends = enabled_backends_count;
1407 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1408 switch (rdev->family) {
1414 force_no_swizzle = false;
1421 force_no_swizzle = true;
1424 if (force_no_swizzle) {
1425 bool last_backend_enabled = false;
1427 force_no_swizzle = false;
1428 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1429 if (((enabled_backends_mask >> i) & 1) == 1) {
1430 if (last_backend_enabled)
1431 force_no_swizzle = true;
1432 last_backend_enabled = true;
1434 last_backend_enabled = false;
1438 switch (num_tile_pipes) {
1443 DRM_ERROR("odd number of pipes!\n");
1446 swizzle_pipe[0] = 0;
1447 swizzle_pipe[1] = 1;
1450 if (force_no_swizzle) {
1451 swizzle_pipe[0] = 0;
1452 swizzle_pipe[1] = 1;
1453 swizzle_pipe[2] = 2;
1454 swizzle_pipe[3] = 3;
1456 swizzle_pipe[0] = 0;
1457 swizzle_pipe[1] = 2;
1458 swizzle_pipe[2] = 1;
1459 swizzle_pipe[3] = 3;
1463 if (force_no_swizzle) {
1464 swizzle_pipe[0] = 0;
1465 swizzle_pipe[1] = 1;
1466 swizzle_pipe[2] = 2;
1467 swizzle_pipe[3] = 3;
1468 swizzle_pipe[4] = 4;
1469 swizzle_pipe[5] = 5;
1471 swizzle_pipe[0] = 0;
1472 swizzle_pipe[1] = 2;
1473 swizzle_pipe[2] = 4;
1474 swizzle_pipe[3] = 1;
1475 swizzle_pipe[4] = 3;
1476 swizzle_pipe[5] = 5;
1480 if (force_no_swizzle) {
1481 swizzle_pipe[0] = 0;
1482 swizzle_pipe[1] = 1;
1483 swizzle_pipe[2] = 2;
1484 swizzle_pipe[3] = 3;
1485 swizzle_pipe[4] = 4;
1486 swizzle_pipe[5] = 5;
1487 swizzle_pipe[6] = 6;
1488 swizzle_pipe[7] = 7;
1490 swizzle_pipe[0] = 0;
1491 swizzle_pipe[1] = 2;
1492 swizzle_pipe[2] = 4;
1493 swizzle_pipe[3] = 6;
1494 swizzle_pipe[4] = 1;
1495 swizzle_pipe[5] = 3;
1496 swizzle_pipe[6] = 5;
1497 swizzle_pipe[7] = 7;
1502 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1503 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1504 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1506 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1508 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1514 static void evergreen_program_channel_remap(struct radeon_device *rdev)
1516 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1518 tmp = RREG32(MC_SHARED_CHMAP);
1519 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1525 /* default mapping */
1526 mc_shared_chremap = 0x00fac688;
1530 switch (rdev->family) {
1534 tcp_chan_steer_lo = 0x54763210;
1535 tcp_chan_steer_hi = 0x0000ba98;
1544 tcp_chan_steer_lo = 0x76543210;
1545 tcp_chan_steer_hi = 0x0000ba98;
1549 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1550 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1551 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1554 static void evergreen_gpu_init(struct radeon_device *rdev)
1556 u32 cc_rb_backend_disable = 0;
1557 u32 cc_gc_shader_pipe_config;
1558 u32 gb_addr_config = 0;
1559 u32 mc_shared_chmap, mc_arb_ramcfg;
1565 u32 sq_lds_resource_mgmt;
1566 u32 sq_gpr_resource_mgmt_1;
1567 u32 sq_gpr_resource_mgmt_2;
1568 u32 sq_gpr_resource_mgmt_3;
1569 u32 sq_thread_resource_mgmt;
1570 u32 sq_thread_resource_mgmt_2;
1571 u32 sq_stack_resource_mgmt_1;
1572 u32 sq_stack_resource_mgmt_2;
1573 u32 sq_stack_resource_mgmt_3;
1574 u32 vgt_cache_invalidation;
1575 u32 hdp_host_path_cntl;
1576 int i, j, num_shader_engines, ps_thread_count;
1578 switch (rdev->family) {
1581 rdev->config.evergreen.num_ses = 2;
1582 rdev->config.evergreen.max_pipes = 4;
1583 rdev->config.evergreen.max_tile_pipes = 8;
1584 rdev->config.evergreen.max_simds = 10;
1585 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1586 rdev->config.evergreen.max_gprs = 256;
1587 rdev->config.evergreen.max_threads = 248;
1588 rdev->config.evergreen.max_gs_threads = 32;
1589 rdev->config.evergreen.max_stack_entries = 512;
1590 rdev->config.evergreen.sx_num_of_sets = 4;
1591 rdev->config.evergreen.sx_max_export_size = 256;
1592 rdev->config.evergreen.sx_max_export_pos_size = 64;
1593 rdev->config.evergreen.sx_max_export_smx_size = 192;
1594 rdev->config.evergreen.max_hw_contexts = 8;
1595 rdev->config.evergreen.sq_num_cf_insts = 2;
1597 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1598 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1599 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1602 rdev->config.evergreen.num_ses = 1;
1603 rdev->config.evergreen.max_pipes = 4;
1604 rdev->config.evergreen.max_tile_pipes = 4;
1605 rdev->config.evergreen.max_simds = 10;
1606 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1607 rdev->config.evergreen.max_gprs = 256;
1608 rdev->config.evergreen.max_threads = 248;
1609 rdev->config.evergreen.max_gs_threads = 32;
1610 rdev->config.evergreen.max_stack_entries = 512;
1611 rdev->config.evergreen.sx_num_of_sets = 4;
1612 rdev->config.evergreen.sx_max_export_size = 256;
1613 rdev->config.evergreen.sx_max_export_pos_size = 64;
1614 rdev->config.evergreen.sx_max_export_smx_size = 192;
1615 rdev->config.evergreen.max_hw_contexts = 8;
1616 rdev->config.evergreen.sq_num_cf_insts = 2;
1618 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1619 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1620 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1623 rdev->config.evergreen.num_ses = 1;
1624 rdev->config.evergreen.max_pipes = 4;
1625 rdev->config.evergreen.max_tile_pipes = 4;
1626 rdev->config.evergreen.max_simds = 5;
1627 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1628 rdev->config.evergreen.max_gprs = 256;
1629 rdev->config.evergreen.max_threads = 248;
1630 rdev->config.evergreen.max_gs_threads = 32;
1631 rdev->config.evergreen.max_stack_entries = 256;
1632 rdev->config.evergreen.sx_num_of_sets = 4;
1633 rdev->config.evergreen.sx_max_export_size = 256;
1634 rdev->config.evergreen.sx_max_export_pos_size = 64;
1635 rdev->config.evergreen.sx_max_export_smx_size = 192;
1636 rdev->config.evergreen.max_hw_contexts = 8;
1637 rdev->config.evergreen.sq_num_cf_insts = 2;
1639 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1640 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1641 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1645 rdev->config.evergreen.num_ses = 1;
1646 rdev->config.evergreen.max_pipes = 2;
1647 rdev->config.evergreen.max_tile_pipes = 2;
1648 rdev->config.evergreen.max_simds = 2;
1649 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1650 rdev->config.evergreen.max_gprs = 256;
1651 rdev->config.evergreen.max_threads = 192;
1652 rdev->config.evergreen.max_gs_threads = 16;
1653 rdev->config.evergreen.max_stack_entries = 256;
1654 rdev->config.evergreen.sx_num_of_sets = 4;
1655 rdev->config.evergreen.sx_max_export_size = 128;
1656 rdev->config.evergreen.sx_max_export_pos_size = 32;
1657 rdev->config.evergreen.sx_max_export_smx_size = 96;
1658 rdev->config.evergreen.max_hw_contexts = 4;
1659 rdev->config.evergreen.sq_num_cf_insts = 1;
1661 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1662 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1663 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1666 rdev->config.evergreen.num_ses = 1;
1667 rdev->config.evergreen.max_pipes = 2;
1668 rdev->config.evergreen.max_tile_pipes = 2;
1669 rdev->config.evergreen.max_simds = 2;
1670 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1671 rdev->config.evergreen.max_gprs = 256;
1672 rdev->config.evergreen.max_threads = 192;
1673 rdev->config.evergreen.max_gs_threads = 16;
1674 rdev->config.evergreen.max_stack_entries = 256;
1675 rdev->config.evergreen.sx_num_of_sets = 4;
1676 rdev->config.evergreen.sx_max_export_size = 128;
1677 rdev->config.evergreen.sx_max_export_pos_size = 32;
1678 rdev->config.evergreen.sx_max_export_smx_size = 96;
1679 rdev->config.evergreen.max_hw_contexts = 4;
1680 rdev->config.evergreen.sq_num_cf_insts = 1;
1682 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1683 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1684 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1687 rdev->config.evergreen.num_ses = 2;
1688 rdev->config.evergreen.max_pipes = 4;
1689 rdev->config.evergreen.max_tile_pipes = 8;
1690 rdev->config.evergreen.max_simds = 7;
1691 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1692 rdev->config.evergreen.max_gprs = 256;
1693 rdev->config.evergreen.max_threads = 248;
1694 rdev->config.evergreen.max_gs_threads = 32;
1695 rdev->config.evergreen.max_stack_entries = 512;
1696 rdev->config.evergreen.sx_num_of_sets = 4;
1697 rdev->config.evergreen.sx_max_export_size = 256;
1698 rdev->config.evergreen.sx_max_export_pos_size = 64;
1699 rdev->config.evergreen.sx_max_export_smx_size = 192;
1700 rdev->config.evergreen.max_hw_contexts = 8;
1701 rdev->config.evergreen.sq_num_cf_insts = 2;
1703 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1704 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1705 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1708 rdev->config.evergreen.num_ses = 1;
1709 rdev->config.evergreen.max_pipes = 4;
1710 rdev->config.evergreen.max_tile_pipes = 4;
1711 rdev->config.evergreen.max_simds = 6;
1712 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1713 rdev->config.evergreen.max_gprs = 256;
1714 rdev->config.evergreen.max_threads = 248;
1715 rdev->config.evergreen.max_gs_threads = 32;
1716 rdev->config.evergreen.max_stack_entries = 256;
1717 rdev->config.evergreen.sx_num_of_sets = 4;
1718 rdev->config.evergreen.sx_max_export_size = 256;
1719 rdev->config.evergreen.sx_max_export_pos_size = 64;
1720 rdev->config.evergreen.sx_max_export_smx_size = 192;
1721 rdev->config.evergreen.max_hw_contexts = 8;
1722 rdev->config.evergreen.sq_num_cf_insts = 2;
1724 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1725 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1726 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1729 rdev->config.evergreen.num_ses = 1;
1730 rdev->config.evergreen.max_pipes = 4;
1731 rdev->config.evergreen.max_tile_pipes = 2;
1732 rdev->config.evergreen.max_simds = 2;
1733 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1734 rdev->config.evergreen.max_gprs = 256;
1735 rdev->config.evergreen.max_threads = 192;
1736 rdev->config.evergreen.max_gs_threads = 16;
1737 rdev->config.evergreen.max_stack_entries = 256;
1738 rdev->config.evergreen.sx_num_of_sets = 4;
1739 rdev->config.evergreen.sx_max_export_size = 128;
1740 rdev->config.evergreen.sx_max_export_pos_size = 32;
1741 rdev->config.evergreen.sx_max_export_smx_size = 96;
1742 rdev->config.evergreen.max_hw_contexts = 4;
1743 rdev->config.evergreen.sq_num_cf_insts = 1;
1745 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1746 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1747 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1751 /* Initialize HDP */
1752 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1753 WREG32((0x2c14 + j), 0x00000000);
1754 WREG32((0x2c18 + j), 0x00000000);
1755 WREG32((0x2c1c + j), 0x00000000);
1756 WREG32((0x2c20 + j), 0x00000000);
1757 WREG32((0x2c24 + j), 0x00000000);
1760 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1762 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1764 cc_gc_shader_pipe_config |=
1765 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1766 & EVERGREEN_MAX_PIPES_MASK);
1767 cc_gc_shader_pipe_config |=
1768 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1769 & EVERGREEN_MAX_SIMDS_MASK);
1771 cc_rb_backend_disable =
1772 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1773 & EVERGREEN_MAX_BACKENDS_MASK);
1776 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1777 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1779 switch (rdev->config.evergreen.max_tile_pipes) {
1782 gb_addr_config |= NUM_PIPES(0);
1785 gb_addr_config |= NUM_PIPES(1);
1788 gb_addr_config |= NUM_PIPES(2);
1791 gb_addr_config |= NUM_PIPES(3);
1795 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1796 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
1797 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
1798 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
1799 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
1800 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1802 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
1803 gb_addr_config |= ROW_SIZE(2);
1805 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
1807 if (rdev->ddev->pdev->device == 0x689e) {
1810 u8 efuse_box_bit_131_124;
1812 WREG32(RCU_IND_INDEX, 0x204);
1813 efuse_straps_4 = RREG32(RCU_IND_DATA);
1814 WREG32(RCU_IND_INDEX, 0x203);
1815 efuse_straps_3 = RREG32(RCU_IND_DATA);
1816 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
1818 switch(efuse_box_bit_131_124) {
1820 gb_backend_map = 0x76543210;
1823 gb_backend_map = 0x77553311;
1826 gb_backend_map = 0x77553300;
1829 gb_backend_map = 0x77552211;
1832 gb_backend_map = 0x77443300;
1835 gb_backend_map = 0x66552211;
1838 gb_backend_map = 0x77552200;
1841 gb_backend_map = 0x66442200;
1844 gb_backend_map = 0x66553311;
1847 DRM_ERROR("bad backend map, using default\n");
1849 evergreen_get_tile_pipe_to_backend_map(rdev,
1850 rdev->config.evergreen.max_tile_pipes,
1851 rdev->config.evergreen.max_backends,
1852 ((EVERGREEN_MAX_BACKENDS_MASK <<
1853 rdev->config.evergreen.max_backends) &
1854 EVERGREEN_MAX_BACKENDS_MASK));
1857 } else if (rdev->ddev->pdev->device == 0x68b9) {
1859 u8 efuse_box_bit_127_124;
1861 WREG32(RCU_IND_INDEX, 0x203);
1862 efuse_straps_3 = RREG32(RCU_IND_DATA);
1863 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
1865 switch(efuse_box_bit_127_124) {
1867 gb_backend_map = 0x00003210;
1873 gb_backend_map = 0x00003311;
1876 DRM_ERROR("bad backend map, using default\n");
1878 evergreen_get_tile_pipe_to_backend_map(rdev,
1879 rdev->config.evergreen.max_tile_pipes,
1880 rdev->config.evergreen.max_backends,
1881 ((EVERGREEN_MAX_BACKENDS_MASK <<
1882 rdev->config.evergreen.max_backends) &
1883 EVERGREEN_MAX_BACKENDS_MASK));
1887 switch (rdev->family) {
1891 gb_backend_map = 0x66442200;
1894 gb_backend_map = 0x00006420;
1898 evergreen_get_tile_pipe_to_backend_map(rdev,
1899 rdev->config.evergreen.max_tile_pipes,
1900 rdev->config.evergreen.max_backends,
1901 ((EVERGREEN_MAX_BACKENDS_MASK <<
1902 rdev->config.evergreen.max_backends) &
1903 EVERGREEN_MAX_BACKENDS_MASK));
1907 /* setup tiling info dword. gb_addr_config is not adequate since it does
1908 * not have bank info, so create a custom tiling dword.
1909 * bits 3:0 num_pipes
1910 * bits 7:4 num_banks
1911 * bits 11:8 group_size
1912 * bits 15:12 row_size
1914 rdev->config.evergreen.tile_config = 0;
1915 switch (rdev->config.evergreen.max_tile_pipes) {
1918 rdev->config.evergreen.tile_config |= (0 << 0);
1921 rdev->config.evergreen.tile_config |= (1 << 0);
1924 rdev->config.evergreen.tile_config |= (2 << 0);
1927 rdev->config.evergreen.tile_config |= (3 << 0);
1930 rdev->config.evergreen.tile_config |=
1931 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1932 rdev->config.evergreen.tile_config |=
1933 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1934 rdev->config.evergreen.tile_config |=
1935 ((gb_addr_config & 0x30000000) >> 28) << 12;
1937 WREG32(GB_BACKEND_MAP, gb_backend_map);
1938 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1939 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1940 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1942 evergreen_program_channel_remap(rdev);
1944 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1945 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
1947 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
1948 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
1949 u32 sp = cc_gc_shader_pipe_config;
1950 u32 gfx = grbm_gfx_index | SE_INDEX(i);
1952 if (i == num_shader_engines) {
1953 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
1954 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
1957 WREG32(GRBM_GFX_INDEX, gfx);
1958 WREG32(RLC_GFX_INDEX, gfx);
1960 WREG32(CC_RB_BACKEND_DISABLE, rb);
1961 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
1962 WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
1963 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
1966 grbm_gfx_index |= SE_BROADCAST_WRITES;
1967 WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
1968 WREG32(RLC_GFX_INDEX, grbm_gfx_index);
1970 WREG32(CGTS_SYS_TCC_DISABLE, 0);
1971 WREG32(CGTS_TCC_DISABLE, 0);
1972 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
1973 WREG32(CGTS_USER_TCC_DISABLE, 0);
1975 /* set HW defaults for 3D engine */
1976 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
1977 ROQ_IB2_START(0x2b)));
1979 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
1981 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
1986 sx_debug_1 = RREG32(SX_DEBUG_1);
1987 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1988 WREG32(SX_DEBUG_1, sx_debug_1);
1991 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1992 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1993 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
1994 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1996 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
1997 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
1998 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2000 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2001 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2002 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2004 WREG32(VGT_NUM_INSTANCES, 1);
2005 WREG32(SPI_CONFIG_CNTL, 0);
2006 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2007 WREG32(CP_PERFMON_CNTL, 0);
2009 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2010 FETCH_FIFO_HIWATER(0x4) |
2011 DONE_FIFO_HIWATER(0xe0) |
2012 ALU_UPDATE_FIFO_HIWATER(0x8)));
2014 sq_config = RREG32(SQ_CONFIG);
2015 sq_config &= ~(PS_PRIO(3) |
2019 sq_config |= (VC_ENABLE |
2026 switch (rdev->family) {
2030 /* no vertex cache */
2031 sq_config &= ~VC_ENABLE;
2037 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2039 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2040 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2041 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2042 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2043 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2044 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2045 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2047 switch (rdev->family) {
2050 ps_thread_count = 96;
2053 ps_thread_count = 128;
2057 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
2058 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2059 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2060 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2061 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2062 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2064 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2065 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2066 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2067 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2068 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2069 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2071 WREG32(SQ_CONFIG, sq_config);
2072 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2073 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2074 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2075 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2076 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2077 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2078 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2079 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2080 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2081 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2083 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2084 FORCE_EOV_MAX_REZ_CNT(255)));
2086 switch (rdev->family) {
2090 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2093 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
2096 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2097 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2099 WREG32(VGT_GS_VERTEX_REUSE, 16);
2100 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2101 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2103 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2104 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2106 WREG32(CB_PERF_CTR0_SEL_0, 0);
2107 WREG32(CB_PERF_CTR0_SEL_1, 0);
2108 WREG32(CB_PERF_CTR1_SEL_0, 0);
2109 WREG32(CB_PERF_CTR1_SEL_1, 0);
2110 WREG32(CB_PERF_CTR2_SEL_0, 0);
2111 WREG32(CB_PERF_CTR2_SEL_1, 0);
2112 WREG32(CB_PERF_CTR3_SEL_0, 0);
2113 WREG32(CB_PERF_CTR3_SEL_1, 0);
2115 /* clear render buffer base addresses */
2116 WREG32(CB_COLOR0_BASE, 0);
2117 WREG32(CB_COLOR1_BASE, 0);
2118 WREG32(CB_COLOR2_BASE, 0);
2119 WREG32(CB_COLOR3_BASE, 0);
2120 WREG32(CB_COLOR4_BASE, 0);
2121 WREG32(CB_COLOR5_BASE, 0);
2122 WREG32(CB_COLOR6_BASE, 0);
2123 WREG32(CB_COLOR7_BASE, 0);
2124 WREG32(CB_COLOR8_BASE, 0);
2125 WREG32(CB_COLOR9_BASE, 0);
2126 WREG32(CB_COLOR10_BASE, 0);
2127 WREG32(CB_COLOR11_BASE, 0);
2129 /* set the shader const cache sizes to 0 */
2130 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2132 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2135 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2136 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2138 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2144 int evergreen_mc_init(struct radeon_device *rdev)
2147 int chansize, numchan;
2149 /* Get VRAM informations */
2150 rdev->mc.vram_is_ddr = true;
2151 tmp = RREG32(MC_ARB_RAMCFG);
2152 if (tmp & CHANSIZE_OVERRIDE) {
2154 } else if (tmp & CHANSIZE_MASK) {
2159 tmp = RREG32(MC_SHARED_CHMAP);
2160 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2175 rdev->mc.vram_width = numchan * chansize;
2176 /* Could aper size report 0 ? */
2177 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2178 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2179 /* Setup GPU memory space */
2180 if (rdev->flags & RADEON_IS_IGP) {
2181 /* size in bytes on fusion */
2182 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2183 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2185 /* size in MB on evergreen */
2186 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2187 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2189 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2190 r700_vram_gtt_location(rdev, &rdev->mc);
2191 radeon_update_bandwidth_info(rdev);
2196 bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
2200 u32 grbm_status_se0, grbm_status_se1;
2201 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2204 srbm_status = RREG32(SRBM_STATUS);
2205 grbm_status = RREG32(GRBM_STATUS);
2206 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2207 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2208 if (!(grbm_status & GUI_ACTIVE)) {
2209 r100_gpu_lockup_update(lockup, &rdev->cp);
2212 /* force CP activities */
2213 r = radeon_ring_lock(rdev, 2);
2216 radeon_ring_write(rdev, 0x80000000);
2217 radeon_ring_write(rdev, 0x80000000);
2218 radeon_ring_unlock_commit(rdev);
2220 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2221 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
2224 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2226 struct evergreen_mc_save save;
2229 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2232 dev_info(rdev->dev, "GPU softreset \n");
2233 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2234 RREG32(GRBM_STATUS));
2235 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2236 RREG32(GRBM_STATUS_SE0));
2237 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2238 RREG32(GRBM_STATUS_SE1));
2239 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2240 RREG32(SRBM_STATUS));
2241 evergreen_mc_stop(rdev, &save);
2242 if (evergreen_mc_wait_for_idle(rdev)) {
2243 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2245 /* Disable CP parsing/prefetching */
2246 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2248 /* reset all the gfx blocks */
2249 grbm_reset = (SOFT_RESET_CP |
2262 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2263 WREG32(GRBM_SOFT_RESET, grbm_reset);
2264 (void)RREG32(GRBM_SOFT_RESET);
2266 WREG32(GRBM_SOFT_RESET, 0);
2267 (void)RREG32(GRBM_SOFT_RESET);
2268 /* Wait a little for things to settle down */
2270 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2271 RREG32(GRBM_STATUS));
2272 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2273 RREG32(GRBM_STATUS_SE0));
2274 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2275 RREG32(GRBM_STATUS_SE1));
2276 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2277 RREG32(SRBM_STATUS));
2278 evergreen_mc_resume(rdev, &save);
2282 int evergreen_asic_reset(struct radeon_device *rdev)
2284 return evergreen_gpu_soft_reset(rdev);
2289 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2293 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2295 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2297 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2299 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2301 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2303 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2309 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2313 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2314 WREG32(GRBM_INT_CNTL, 0);
2315 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2316 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2317 if (!(rdev->flags & RADEON_IS_IGP)) {
2318 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2319 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2320 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2321 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2324 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2325 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2326 if (!(rdev->flags & RADEON_IS_IGP)) {
2327 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2328 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2329 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2330 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2333 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2334 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2336 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2337 WREG32(DC_HPD1_INT_CONTROL, tmp);
2338 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2339 WREG32(DC_HPD2_INT_CONTROL, tmp);
2340 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2341 WREG32(DC_HPD3_INT_CONTROL, tmp);
2342 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2343 WREG32(DC_HPD4_INT_CONTROL, tmp);
2344 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2345 WREG32(DC_HPD5_INT_CONTROL, tmp);
2346 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2347 WREG32(DC_HPD6_INT_CONTROL, tmp);
2351 int evergreen_irq_set(struct radeon_device *rdev)
2353 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2354 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2355 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2356 u32 grbm_int_cntl = 0;
2357 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2359 if (!rdev->irq.installed) {
2360 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2363 /* don't enable anything if the ih is disabled */
2364 if (!rdev->ih.enabled) {
2365 r600_disable_interrupts(rdev);
2366 /* force the active interrupt state to all disabled */
2367 evergreen_disable_interrupt_state(rdev);
2371 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2372 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2373 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2374 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2375 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2376 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2378 if (rdev->irq.sw_int) {
2379 DRM_DEBUG("evergreen_irq_set: sw int\n");
2380 cp_int_cntl |= RB_INT_ENABLE;
2381 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2383 if (rdev->irq.crtc_vblank_int[0] ||
2384 rdev->irq.pflip[0]) {
2385 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2386 crtc1 |= VBLANK_INT_MASK;
2388 if (rdev->irq.crtc_vblank_int[1] ||
2389 rdev->irq.pflip[1]) {
2390 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2391 crtc2 |= VBLANK_INT_MASK;
2393 if (rdev->irq.crtc_vblank_int[2] ||
2394 rdev->irq.pflip[2]) {
2395 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2396 crtc3 |= VBLANK_INT_MASK;
2398 if (rdev->irq.crtc_vblank_int[3] ||
2399 rdev->irq.pflip[3]) {
2400 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2401 crtc4 |= VBLANK_INT_MASK;
2403 if (rdev->irq.crtc_vblank_int[4] ||
2404 rdev->irq.pflip[4]) {
2405 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2406 crtc5 |= VBLANK_INT_MASK;
2408 if (rdev->irq.crtc_vblank_int[5] ||
2409 rdev->irq.pflip[5]) {
2410 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2411 crtc6 |= VBLANK_INT_MASK;
2413 if (rdev->irq.hpd[0]) {
2414 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2415 hpd1 |= DC_HPDx_INT_EN;
2417 if (rdev->irq.hpd[1]) {
2418 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2419 hpd2 |= DC_HPDx_INT_EN;
2421 if (rdev->irq.hpd[2]) {
2422 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2423 hpd3 |= DC_HPDx_INT_EN;
2425 if (rdev->irq.hpd[3]) {
2426 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2427 hpd4 |= DC_HPDx_INT_EN;
2429 if (rdev->irq.hpd[4]) {
2430 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2431 hpd5 |= DC_HPDx_INT_EN;
2433 if (rdev->irq.hpd[5]) {
2434 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2435 hpd6 |= DC_HPDx_INT_EN;
2437 if (rdev->irq.gui_idle) {
2438 DRM_DEBUG("gui idle\n");
2439 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2442 WREG32(CP_INT_CNTL, cp_int_cntl);
2443 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2445 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2446 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
2447 if (!(rdev->flags & RADEON_IS_IGP)) {
2448 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2449 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
2450 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2451 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2454 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2455 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2456 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2457 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2458 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2459 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2461 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2462 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2463 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2464 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2465 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2466 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2471 static inline void evergreen_irq_ack(struct radeon_device *rdev)
2475 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2476 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2477 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2478 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2479 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2480 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2481 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2482 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2483 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2484 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2485 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2486 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2488 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2489 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2490 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2491 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2492 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2493 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2494 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2495 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2496 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2497 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2498 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2499 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2501 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2502 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2503 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2504 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2506 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2507 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2509 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2511 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2512 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2513 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2514 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2516 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2517 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2518 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2519 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2521 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2522 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2523 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2524 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2526 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2527 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2528 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2529 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2531 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2532 tmp = RREG32(DC_HPD1_INT_CONTROL);
2533 tmp |= DC_HPDx_INT_ACK;
2534 WREG32(DC_HPD1_INT_CONTROL, tmp);
2536 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2537 tmp = RREG32(DC_HPD2_INT_CONTROL);
2538 tmp |= DC_HPDx_INT_ACK;
2539 WREG32(DC_HPD2_INT_CONTROL, tmp);
2541 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2542 tmp = RREG32(DC_HPD3_INT_CONTROL);
2543 tmp |= DC_HPDx_INT_ACK;
2544 WREG32(DC_HPD3_INT_CONTROL, tmp);
2546 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2547 tmp = RREG32(DC_HPD4_INT_CONTROL);
2548 tmp |= DC_HPDx_INT_ACK;
2549 WREG32(DC_HPD4_INT_CONTROL, tmp);
2551 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2552 tmp = RREG32(DC_HPD5_INT_CONTROL);
2553 tmp |= DC_HPDx_INT_ACK;
2554 WREG32(DC_HPD5_INT_CONTROL, tmp);
2556 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2557 tmp = RREG32(DC_HPD5_INT_CONTROL);
2558 tmp |= DC_HPDx_INT_ACK;
2559 WREG32(DC_HPD6_INT_CONTROL, tmp);
2563 void evergreen_irq_disable(struct radeon_device *rdev)
2565 r600_disable_interrupts(rdev);
2566 /* Wait and acknowledge irq */
2568 evergreen_irq_ack(rdev);
2569 evergreen_disable_interrupt_state(rdev);
2572 void evergreen_irq_suspend(struct radeon_device *rdev)
2574 evergreen_irq_disable(rdev);
2575 r600_rlc_stop(rdev);
2578 static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2582 if (rdev->wb.enabled)
2583 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
2585 wptr = RREG32(IH_RB_WPTR);
2587 if (wptr & RB_OVERFLOW) {
2588 /* When a ring buffer overflow happen start parsing interrupt
2589 * from the last not overwritten vector (wptr + 16). Hopefully
2590 * this should allow us to catchup.
2592 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2593 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2594 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2595 tmp = RREG32(IH_RB_CNTL);
2596 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2597 WREG32(IH_RB_CNTL, tmp);
2599 return (wptr & rdev->ih.ptr_mask);
2602 int evergreen_irq_process(struct radeon_device *rdev)
2604 u32 wptr = evergreen_get_ih_wptr(rdev);
2605 u32 rptr = rdev->ih.rptr;
2606 u32 src_id, src_data;
2608 unsigned long flags;
2609 bool queue_hotplug = false;
2611 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2612 if (!rdev->ih.enabled)
2615 spin_lock_irqsave(&rdev->ih.lock, flags);
2618 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2621 if (rdev->shutdown) {
2622 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2627 /* display interrupts */
2628 evergreen_irq_ack(rdev);
2630 rdev->ih.wptr = wptr;
2631 while (rptr != wptr) {
2632 /* wptr/rptr are in bytes! */
2633 ring_index = rptr / 4;
2634 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2635 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2638 case 1: /* D1 vblank/vline */
2640 case 0: /* D1 vblank */
2641 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
2642 if (rdev->irq.crtc_vblank_int[0]) {
2643 drm_handle_vblank(rdev->ddev, 0);
2644 rdev->pm.vblank_sync = true;
2645 wake_up(&rdev->irq.vblank_queue);
2647 if (rdev->irq.pflip[0])
2648 radeon_crtc_handle_flip(rdev, 0);
2649 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2650 DRM_DEBUG("IH: D1 vblank\n");
2653 case 1: /* D1 vline */
2654 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2655 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
2656 DRM_DEBUG("IH: D1 vline\n");
2660 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2664 case 2: /* D2 vblank/vline */
2666 case 0: /* D2 vblank */
2667 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2668 if (rdev->irq.crtc_vblank_int[1]) {
2669 drm_handle_vblank(rdev->ddev, 1);
2670 rdev->pm.vblank_sync = true;
2671 wake_up(&rdev->irq.vblank_queue);
2673 if (rdev->irq.pflip[1])
2674 radeon_crtc_handle_flip(rdev, 1);
2675 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2676 DRM_DEBUG("IH: D2 vblank\n");
2679 case 1: /* D2 vline */
2680 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2681 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
2682 DRM_DEBUG("IH: D2 vline\n");
2686 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2690 case 3: /* D3 vblank/vline */
2692 case 0: /* D3 vblank */
2693 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2694 if (rdev->irq.crtc_vblank_int[2]) {
2695 drm_handle_vblank(rdev->ddev, 2);
2696 rdev->pm.vblank_sync = true;
2697 wake_up(&rdev->irq.vblank_queue);
2699 if (rdev->irq.pflip[2])
2700 radeon_crtc_handle_flip(rdev, 2);
2701 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2702 DRM_DEBUG("IH: D3 vblank\n");
2705 case 1: /* D3 vline */
2706 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
2707 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
2708 DRM_DEBUG("IH: D3 vline\n");
2712 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2716 case 4: /* D4 vblank/vline */
2718 case 0: /* D4 vblank */
2719 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2720 if (rdev->irq.crtc_vblank_int[3]) {
2721 drm_handle_vblank(rdev->ddev, 3);
2722 rdev->pm.vblank_sync = true;
2723 wake_up(&rdev->irq.vblank_queue);
2725 if (rdev->irq.pflip[3])
2726 radeon_crtc_handle_flip(rdev, 3);
2727 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2728 DRM_DEBUG("IH: D4 vblank\n");
2731 case 1: /* D4 vline */
2732 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
2733 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
2734 DRM_DEBUG("IH: D4 vline\n");
2738 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2742 case 5: /* D5 vblank/vline */
2744 case 0: /* D5 vblank */
2745 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2746 if (rdev->irq.crtc_vblank_int[4]) {
2747 drm_handle_vblank(rdev->ddev, 4);
2748 rdev->pm.vblank_sync = true;
2749 wake_up(&rdev->irq.vblank_queue);
2751 if (rdev->irq.pflip[4])
2752 radeon_crtc_handle_flip(rdev, 4);
2753 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2754 DRM_DEBUG("IH: D5 vblank\n");
2757 case 1: /* D5 vline */
2758 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
2759 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
2760 DRM_DEBUG("IH: D5 vline\n");
2764 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2768 case 6: /* D6 vblank/vline */
2770 case 0: /* D6 vblank */
2771 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2772 if (rdev->irq.crtc_vblank_int[5]) {
2773 drm_handle_vblank(rdev->ddev, 5);
2774 rdev->pm.vblank_sync = true;
2775 wake_up(&rdev->irq.vblank_queue);
2777 if (rdev->irq.pflip[5])
2778 radeon_crtc_handle_flip(rdev, 5);
2779 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2780 DRM_DEBUG("IH: D6 vblank\n");
2783 case 1: /* D6 vline */
2784 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
2785 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
2786 DRM_DEBUG("IH: D6 vline\n");
2790 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2794 case 42: /* HPD hotplug */
2797 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2798 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
2799 queue_hotplug = true;
2800 DRM_DEBUG("IH: HPD1\n");
2804 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2805 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
2806 queue_hotplug = true;
2807 DRM_DEBUG("IH: HPD2\n");
2811 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2812 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
2813 queue_hotplug = true;
2814 DRM_DEBUG("IH: HPD3\n");
2818 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2819 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
2820 queue_hotplug = true;
2821 DRM_DEBUG("IH: HPD4\n");
2825 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2826 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
2827 queue_hotplug = true;
2828 DRM_DEBUG("IH: HPD5\n");
2832 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2833 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
2834 queue_hotplug = true;
2835 DRM_DEBUG("IH: HPD6\n");
2839 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2843 case 176: /* CP_INT in ring buffer */
2844 case 177: /* CP_INT in IB1 */
2845 case 178: /* CP_INT in IB2 */
2846 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2847 radeon_fence_process(rdev);
2849 case 181: /* CP EOP event */
2850 DRM_DEBUG("IH: CP EOP\n");
2851 radeon_fence_process(rdev);
2853 case 233: /* GUI IDLE */
2854 DRM_DEBUG("IH: CP EOP\n");
2855 rdev->pm.gui_idle = true;
2856 wake_up(&rdev->irq.idle_queue);
2859 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2863 /* wptr/rptr are in bytes! */
2865 rptr &= rdev->ih.ptr_mask;
2867 /* make sure wptr hasn't changed while processing */
2868 wptr = evergreen_get_ih_wptr(rdev);
2869 if (wptr != rdev->ih.wptr)
2872 schedule_work(&rdev->hotplug_work);
2873 rdev->ih.rptr = rptr;
2874 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2875 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2879 static int evergreen_startup(struct radeon_device *rdev)
2883 /* enable pcie gen2 link */
2884 if (!ASIC_IS_DCE5(rdev))
2885 evergreen_pcie_gen2_enable(rdev);
2887 if (ASIC_IS_DCE5(rdev)) {
2888 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2889 r = ni_init_microcode(rdev);
2891 DRM_ERROR("Failed to load firmware!\n");
2895 r = ni_mc_load_microcode(rdev);
2897 DRM_ERROR("Failed to load MC firmware!\n");
2901 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2902 r = r600_init_microcode(rdev);
2904 DRM_ERROR("Failed to load firmware!\n");
2910 evergreen_mc_program(rdev);
2911 if (rdev->flags & RADEON_IS_AGP) {
2912 evergreen_agp_enable(rdev);
2914 r = evergreen_pcie_gart_enable(rdev);
2918 evergreen_gpu_init(rdev);
2920 r = evergreen_blit_init(rdev);
2922 evergreen_blit_fini(rdev);
2923 rdev->asic->copy = NULL;
2924 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2926 /* XXX: ontario has problems blitting to gart at the moment */
2927 if (rdev->family == CHIP_PALM) {
2928 rdev->asic->copy = NULL;
2929 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2932 /* allocate wb buffer */
2933 r = radeon_wb_init(rdev);
2938 r = r600_irq_init(rdev);
2940 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2941 radeon_irq_kms_fini(rdev);
2944 evergreen_irq_set(rdev);
2946 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2949 r = evergreen_cp_load_microcode(rdev);
2952 r = evergreen_cp_resume(rdev);
2959 int evergreen_resume(struct radeon_device *rdev)
2963 /* reset the asic, the gfx blocks are often in a bad state
2964 * after the driver is unloaded or after a resume
2966 if (radeon_asic_reset(rdev))
2967 dev_warn(rdev->dev, "GPU reset failed !\n");
2968 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2969 * posting will perform necessary task to bring back GPU into good
2973 atom_asic_init(rdev->mode_info.atom_context);
2975 r = evergreen_startup(rdev);
2977 DRM_ERROR("evergreen startup failed on resume\n");
2981 r = r600_ib_test(rdev);
2983 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2991 int evergreen_suspend(struct radeon_device *rdev)
2995 /* FIXME: we should wait for ring to be empty */
2997 rdev->cp.ready = false;
2998 evergreen_irq_suspend(rdev);
2999 radeon_wb_disable(rdev);
3000 evergreen_pcie_gart_disable(rdev);
3002 /* unpin shaders bo */
3003 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
3004 if (likely(r == 0)) {
3005 radeon_bo_unpin(rdev->r600_blit.shader_obj);
3006 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
3012 int evergreen_copy_blit(struct radeon_device *rdev,
3013 uint64_t src_offset, uint64_t dst_offset,
3014 unsigned num_pages, struct radeon_fence *fence)
3018 mutex_lock(&rdev->r600_blit.mutex);
3019 rdev->r600_blit.vb_ib = NULL;
3020 r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
3022 if (rdev->r600_blit.vb_ib)
3023 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
3024 mutex_unlock(&rdev->r600_blit.mutex);
3027 evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
3028 evergreen_blit_done_copy(rdev, fence);
3029 mutex_unlock(&rdev->r600_blit.mutex);
3033 /* Plan is to move initialization in that function and use
3034 * helper function so that radeon_device_init pretty much
3035 * do nothing more than calling asic specific function. This
3036 * should also allow to remove a bunch of callback function
3039 int evergreen_init(struct radeon_device *rdev)
3043 /* This don't do much */
3044 r = radeon_gem_init(rdev);
3048 if (!radeon_get_bios(rdev)) {
3049 if (ASIC_IS_AVIVO(rdev))
3052 /* Must be an ATOMBIOS */
3053 if (!rdev->is_atom_bios) {
3054 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
3057 r = radeon_atombios_init(rdev);
3060 /* reset the asic, the gfx blocks are often in a bad state
3061 * after the driver is unloaded or after a resume
3063 if (radeon_asic_reset(rdev))
3064 dev_warn(rdev->dev, "GPU reset failed !\n");
3065 /* Post card if necessary */
3066 if (!radeon_card_posted(rdev)) {
3068 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3071 DRM_INFO("GPU not posted. posting now...\n");
3072 atom_asic_init(rdev->mode_info.atom_context);
3074 /* Initialize scratch registers */
3075 r600_scratch_init(rdev);
3076 /* Initialize surface registers */
3077 radeon_surface_init(rdev);
3078 /* Initialize clocks */
3079 radeon_get_clock_info(rdev->ddev);
3081 r = radeon_fence_driver_init(rdev);
3084 /* initialize AGP */
3085 if (rdev->flags & RADEON_IS_AGP) {
3086 r = radeon_agp_init(rdev);
3088 radeon_agp_disable(rdev);
3090 /* initialize memory controller */
3091 r = evergreen_mc_init(rdev);
3094 /* Memory manager */
3095 r = radeon_bo_init(rdev);
3099 r = radeon_irq_kms_init(rdev);
3103 rdev->cp.ring_obj = NULL;
3104 r600_ring_init(rdev, 1024 * 1024);
3106 rdev->ih.ring_obj = NULL;
3107 r600_ih_ring_init(rdev, 64 * 1024);
3109 r = r600_pcie_gart_init(rdev);
3113 rdev->accel_working = true;
3114 r = evergreen_startup(rdev);
3116 dev_err(rdev->dev, "disabling GPU acceleration\n");
3118 r600_irq_fini(rdev);
3119 radeon_wb_fini(rdev);
3120 radeon_irq_kms_fini(rdev);
3121 evergreen_pcie_gart_fini(rdev);
3122 rdev->accel_working = false;
3124 if (rdev->accel_working) {
3125 r = radeon_ib_pool_init(rdev);
3127 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
3128 rdev->accel_working = false;
3130 r = r600_ib_test(rdev);
3132 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3133 rdev->accel_working = false;
3139 void evergreen_fini(struct radeon_device *rdev)
3141 evergreen_blit_fini(rdev);
3143 r600_irq_fini(rdev);
3144 radeon_wb_fini(rdev);
3145 radeon_irq_kms_fini(rdev);
3146 evergreen_pcie_gart_fini(rdev);
3147 radeon_gem_fini(rdev);
3148 radeon_fence_driver_fini(rdev);
3149 radeon_agp_fini(rdev);
3150 radeon_bo_fini(rdev);
3151 radeon_atombios_fini(rdev);
3156 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3158 u32 link_width_cntl, speed_cntl;
3160 if (radeon_pcie_gen2 == 0)
3163 if (rdev->flags & RADEON_IS_IGP)
3166 if (!(rdev->flags & RADEON_IS_PCIE))
3169 /* x2 cards have a special sequence */
3170 if (ASIC_IS_X2(rdev))
3173 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3174 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3175 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3177 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3178 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3179 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3181 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3182 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3183 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3185 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3186 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3187 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3189 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3190 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3191 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3193 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3194 speed_cntl |= LC_GEN2_EN_STRAP;
3195 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3198 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3199 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3201 link_width_cntl |= LC_UPCONFIGURE_DIS;
3203 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3204 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);