2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
50 #define CAYMAN_RLC_UCODE_SIZE 1024
53 MODULE_FIRMWARE("radeon/R600_pfp.bin");
54 MODULE_FIRMWARE("radeon/R600_me.bin");
55 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
56 MODULE_FIRMWARE("radeon/RV610_me.bin");
57 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV630_me.bin");
59 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV620_me.bin");
61 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV635_me.bin");
63 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV670_me.bin");
65 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
66 MODULE_FIRMWARE("radeon/RS780_me.bin");
67 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
68 MODULE_FIRMWARE("radeon/RV770_me.bin");
69 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
70 MODULE_FIRMWARE("radeon/RV730_me.bin");
71 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
72 MODULE_FIRMWARE("radeon/RV710_me.bin");
73 MODULE_FIRMWARE("radeon/R600_rlc.bin");
74 MODULE_FIRMWARE("radeon/R700_rlc.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88 MODULE_FIRMWARE("radeon/PALM_me.bin");
89 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
91 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
93 /* r600,rv610,rv630,rv620,rv635,rv670 */
94 int r600_mc_wait_for_idle(struct radeon_device *rdev);
95 void r600_gpu_init(struct radeon_device *rdev);
96 void r600_fini(struct radeon_device *rdev);
97 void r600_irq_disable(struct radeon_device *rdev);
98 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
100 /* get temperature in millidegrees */
101 int rv6xx_get_temp(struct radeon_device *rdev)
103 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
105 int actual_temp = temp & 0xff;
110 return actual_temp * 1000;
113 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
117 rdev->pm.dynpm_can_upclock = true;
118 rdev->pm.dynpm_can_downclock = true;
120 /* power state array is low to high, default is first */
121 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
122 int min_power_state_index = 0;
124 if (rdev->pm.num_power_states > 2)
125 min_power_state_index = 1;
127 switch (rdev->pm.dynpm_planned_action) {
128 case DYNPM_ACTION_MINIMUM:
129 rdev->pm.requested_power_state_index = min_power_state_index;
130 rdev->pm.requested_clock_mode_index = 0;
131 rdev->pm.dynpm_can_downclock = false;
133 case DYNPM_ACTION_DOWNCLOCK:
134 if (rdev->pm.current_power_state_index == min_power_state_index) {
135 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
136 rdev->pm.dynpm_can_downclock = false;
138 if (rdev->pm.active_crtc_count > 1) {
139 for (i = 0; i < rdev->pm.num_power_states; i++) {
140 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
142 else if (i >= rdev->pm.current_power_state_index) {
143 rdev->pm.requested_power_state_index =
144 rdev->pm.current_power_state_index;
147 rdev->pm.requested_power_state_index = i;
152 if (rdev->pm.current_power_state_index == 0)
153 rdev->pm.requested_power_state_index =
154 rdev->pm.num_power_states - 1;
156 rdev->pm.requested_power_state_index =
157 rdev->pm.current_power_state_index - 1;
160 rdev->pm.requested_clock_mode_index = 0;
161 /* don't use the power state if crtcs are active and no display flag is set */
162 if ((rdev->pm.active_crtc_count > 0) &&
163 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
164 clock_info[rdev->pm.requested_clock_mode_index].flags &
165 RADEON_PM_MODE_NO_DISPLAY)) {
166 rdev->pm.requested_power_state_index++;
169 case DYNPM_ACTION_UPCLOCK:
170 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
171 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
172 rdev->pm.dynpm_can_upclock = false;
174 if (rdev->pm.active_crtc_count > 1) {
175 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
176 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
178 else if (i <= rdev->pm.current_power_state_index) {
179 rdev->pm.requested_power_state_index =
180 rdev->pm.current_power_state_index;
183 rdev->pm.requested_power_state_index = i;
188 rdev->pm.requested_power_state_index =
189 rdev->pm.current_power_state_index + 1;
191 rdev->pm.requested_clock_mode_index = 0;
193 case DYNPM_ACTION_DEFAULT:
194 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
195 rdev->pm.requested_clock_mode_index = 0;
196 rdev->pm.dynpm_can_upclock = false;
198 case DYNPM_ACTION_NONE:
200 DRM_ERROR("Requested mode for not defined action\n");
204 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
205 /* for now just select the first power state and switch between clock modes */
206 /* power state array is low to high, default is first (0) */
207 if (rdev->pm.active_crtc_count > 1) {
208 rdev->pm.requested_power_state_index = -1;
209 /* start at 1 as we don't want the default mode */
210 for (i = 1; i < rdev->pm.num_power_states; i++) {
211 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
213 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
214 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
215 rdev->pm.requested_power_state_index = i;
219 /* if nothing selected, grab the default state. */
220 if (rdev->pm.requested_power_state_index == -1)
221 rdev->pm.requested_power_state_index = 0;
223 rdev->pm.requested_power_state_index = 1;
225 switch (rdev->pm.dynpm_planned_action) {
226 case DYNPM_ACTION_MINIMUM:
227 rdev->pm.requested_clock_mode_index = 0;
228 rdev->pm.dynpm_can_downclock = false;
230 case DYNPM_ACTION_DOWNCLOCK:
231 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
232 if (rdev->pm.current_clock_mode_index == 0) {
233 rdev->pm.requested_clock_mode_index = 0;
234 rdev->pm.dynpm_can_downclock = false;
236 rdev->pm.requested_clock_mode_index =
237 rdev->pm.current_clock_mode_index - 1;
239 rdev->pm.requested_clock_mode_index = 0;
240 rdev->pm.dynpm_can_downclock = false;
242 /* don't use the power state if crtcs are active and no display flag is set */
243 if ((rdev->pm.active_crtc_count > 0) &&
244 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
245 clock_info[rdev->pm.requested_clock_mode_index].flags &
246 RADEON_PM_MODE_NO_DISPLAY)) {
247 rdev->pm.requested_clock_mode_index++;
250 case DYNPM_ACTION_UPCLOCK:
251 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
252 if (rdev->pm.current_clock_mode_index ==
253 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
254 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
255 rdev->pm.dynpm_can_upclock = false;
257 rdev->pm.requested_clock_mode_index =
258 rdev->pm.current_clock_mode_index + 1;
260 rdev->pm.requested_clock_mode_index =
261 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
262 rdev->pm.dynpm_can_upclock = false;
265 case DYNPM_ACTION_DEFAULT:
266 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
267 rdev->pm.requested_clock_mode_index = 0;
268 rdev->pm.dynpm_can_upclock = false;
270 case DYNPM_ACTION_NONE:
272 DRM_ERROR("Requested mode for not defined action\n");
277 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
278 rdev->pm.power_state[rdev->pm.requested_power_state_index].
279 clock_info[rdev->pm.requested_clock_mode_index].sclk,
280 rdev->pm.power_state[rdev->pm.requested_power_state_index].
281 clock_info[rdev->pm.requested_clock_mode_index].mclk,
282 rdev->pm.power_state[rdev->pm.requested_power_state_index].
286 static int r600_pm_get_type_index(struct radeon_device *rdev,
287 enum radeon_pm_state_type ps_type,
291 int found_instance = -1;
293 for (i = 0; i < rdev->pm.num_power_states; i++) {
294 if (rdev->pm.power_state[i].type == ps_type) {
296 if (found_instance == instance)
300 /* return default if no match */
301 return rdev->pm.default_power_state_index;
304 void rs780_pm_init_profile(struct radeon_device *rdev)
306 if (rdev->pm.num_power_states == 2) {
308 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
309 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
310 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
325 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
331 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
336 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
340 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
341 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
342 } else if (rdev->pm.num_power_states == 3) {
344 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
345 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
346 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
349 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
350 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
351 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
352 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
355 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
356 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
357 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
359 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
360 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
361 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
362 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
364 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
365 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
366 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
367 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
369 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
370 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
371 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
372 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
375 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
376 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
377 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
380 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
381 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
382 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
385 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
386 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
387 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
388 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
390 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
391 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
392 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
395 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
396 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
397 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
400 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
401 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
402 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
403 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
405 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
406 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
407 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
408 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
410 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
411 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
412 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
413 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
417 void r600_pm_init_profile(struct radeon_device *rdev)
419 if (rdev->family == CHIP_R600) {
422 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
425 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
427 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
429 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
430 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
432 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
435 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
437 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
440 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
442 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
445 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
447 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
448 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
449 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
450 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
452 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
453 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
454 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
455 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
457 if (rdev->pm.num_power_states < 4) {
459 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
460 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
461 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
462 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
464 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
466 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
467 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
469 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
470 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
471 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
472 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
474 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
475 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
476 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
477 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
479 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
480 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
481 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
482 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
484 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
485 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
486 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
487 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
489 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
490 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
491 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
492 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
495 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
496 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
497 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
498 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
500 if (rdev->flags & RADEON_IS_MOBILITY) {
501 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
502 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
504 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
505 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
511 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
512 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
516 if (rdev->flags & RADEON_IS_MOBILITY) {
517 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
518 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
519 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
520 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
521 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
527 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
528 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
532 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
533 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
534 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
535 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
536 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
539 if (rdev->flags & RADEON_IS_MOBILITY) {
540 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
541 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
542 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
543 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
544 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
550 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
551 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
555 if (rdev->flags & RADEON_IS_MOBILITY) {
556 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
557 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
559 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
560 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
566 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
567 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
571 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
572 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
573 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
574 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
575 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
581 void r600_pm_misc(struct radeon_device *rdev)
583 int req_ps_idx = rdev->pm.requested_power_state_index;
584 int req_cm_idx = rdev->pm.requested_clock_mode_index;
585 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
586 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
588 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
589 if (voltage->voltage != rdev->pm.current_vddc) {
590 radeon_atom_set_voltage(rdev, voltage->voltage);
591 rdev->pm.current_vddc = voltage->voltage;
592 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
597 bool r600_gui_idle(struct radeon_device *rdev)
599 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
605 /* hpd for digital panel detect/disconnect */
606 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
608 bool connected = false;
610 if (ASIC_IS_DCE3(rdev)) {
613 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
617 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
621 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
625 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
630 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
634 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
643 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
647 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
651 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
661 void r600_hpd_set_polarity(struct radeon_device *rdev,
662 enum radeon_hpd_id hpd)
665 bool connected = r600_hpd_sense(rdev, hpd);
667 if (ASIC_IS_DCE3(rdev)) {
670 tmp = RREG32(DC_HPD1_INT_CONTROL);
672 tmp &= ~DC_HPDx_INT_POLARITY;
674 tmp |= DC_HPDx_INT_POLARITY;
675 WREG32(DC_HPD1_INT_CONTROL, tmp);
678 tmp = RREG32(DC_HPD2_INT_CONTROL);
680 tmp &= ~DC_HPDx_INT_POLARITY;
682 tmp |= DC_HPDx_INT_POLARITY;
683 WREG32(DC_HPD2_INT_CONTROL, tmp);
686 tmp = RREG32(DC_HPD3_INT_CONTROL);
688 tmp &= ~DC_HPDx_INT_POLARITY;
690 tmp |= DC_HPDx_INT_POLARITY;
691 WREG32(DC_HPD3_INT_CONTROL, tmp);
694 tmp = RREG32(DC_HPD4_INT_CONTROL);
696 tmp &= ~DC_HPDx_INT_POLARITY;
698 tmp |= DC_HPDx_INT_POLARITY;
699 WREG32(DC_HPD4_INT_CONTROL, tmp);
702 tmp = RREG32(DC_HPD5_INT_CONTROL);
704 tmp &= ~DC_HPDx_INT_POLARITY;
706 tmp |= DC_HPDx_INT_POLARITY;
707 WREG32(DC_HPD5_INT_CONTROL, tmp);
711 tmp = RREG32(DC_HPD6_INT_CONTROL);
713 tmp &= ~DC_HPDx_INT_POLARITY;
715 tmp |= DC_HPDx_INT_POLARITY;
716 WREG32(DC_HPD6_INT_CONTROL, tmp);
724 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
726 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
728 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
729 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
732 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
734 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
736 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
737 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
740 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
742 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
744 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
745 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
753 void r600_hpd_init(struct radeon_device *rdev)
755 struct drm_device *dev = rdev->ddev;
756 struct drm_connector *connector;
758 if (ASIC_IS_DCE3(rdev)) {
759 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
760 if (ASIC_IS_DCE32(rdev))
763 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
764 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
765 switch (radeon_connector->hpd.hpd) {
767 WREG32(DC_HPD1_CONTROL, tmp);
768 rdev->irq.hpd[0] = true;
771 WREG32(DC_HPD2_CONTROL, tmp);
772 rdev->irq.hpd[1] = true;
775 WREG32(DC_HPD3_CONTROL, tmp);
776 rdev->irq.hpd[2] = true;
779 WREG32(DC_HPD4_CONTROL, tmp);
780 rdev->irq.hpd[3] = true;
784 WREG32(DC_HPD5_CONTROL, tmp);
785 rdev->irq.hpd[4] = true;
788 WREG32(DC_HPD6_CONTROL, tmp);
789 rdev->irq.hpd[5] = true;
796 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
797 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
798 switch (radeon_connector->hpd.hpd) {
800 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
801 rdev->irq.hpd[0] = true;
804 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
805 rdev->irq.hpd[1] = true;
808 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
809 rdev->irq.hpd[2] = true;
816 if (rdev->irq.installed)
820 void r600_hpd_fini(struct radeon_device *rdev)
822 struct drm_device *dev = rdev->ddev;
823 struct drm_connector *connector;
825 if (ASIC_IS_DCE3(rdev)) {
826 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
827 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
828 switch (radeon_connector->hpd.hpd) {
830 WREG32(DC_HPD1_CONTROL, 0);
831 rdev->irq.hpd[0] = false;
834 WREG32(DC_HPD2_CONTROL, 0);
835 rdev->irq.hpd[1] = false;
838 WREG32(DC_HPD3_CONTROL, 0);
839 rdev->irq.hpd[2] = false;
842 WREG32(DC_HPD4_CONTROL, 0);
843 rdev->irq.hpd[3] = false;
847 WREG32(DC_HPD5_CONTROL, 0);
848 rdev->irq.hpd[4] = false;
851 WREG32(DC_HPD6_CONTROL, 0);
852 rdev->irq.hpd[5] = false;
859 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
860 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
861 switch (radeon_connector->hpd.hpd) {
863 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
864 rdev->irq.hpd[0] = false;
867 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
868 rdev->irq.hpd[1] = false;
871 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
872 rdev->irq.hpd[2] = false;
884 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
889 /* flush hdp cache so updates hit vram */
890 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
891 !(rdev->flags & RADEON_IS_AGP)) {
892 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
895 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
896 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
897 * This seems to cause problems on some AGP cards. Just use the old
900 WREG32(HDP_DEBUG1, 0);
901 tmp = readl((void __iomem *)ptr);
903 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
905 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
906 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
907 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
908 for (i = 0; i < rdev->usec_timeout; i++) {
910 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
911 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
913 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
923 int r600_pcie_gart_init(struct radeon_device *rdev)
927 if (rdev->gart.table.vram.robj) {
928 WARN(1, "R600 PCIE GART already initialized\n");
931 /* Initialize common gart structure */
932 r = radeon_gart_init(rdev);
935 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
936 return radeon_gart_table_vram_alloc(rdev);
939 int r600_pcie_gart_enable(struct radeon_device *rdev)
944 if (rdev->gart.table.vram.robj == NULL) {
945 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
948 r = radeon_gart_table_vram_pin(rdev);
951 radeon_gart_restore(rdev);
954 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
955 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
956 EFFECTIVE_L2_QUEUE_SIZE(7));
957 WREG32(VM_L2_CNTL2, 0);
958 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
959 /* Setup TLB control */
960 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
961 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
962 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
963 ENABLE_WAIT_L2_QUERY;
964 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
965 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
966 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
967 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
968 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
969 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
970 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
971 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
972 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
973 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
974 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
975 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
976 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
977 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
978 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
979 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
980 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
981 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
982 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
983 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
984 (u32)(rdev->dummy_page.addr >> 12));
985 for (i = 1; i < 7; i++)
986 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
988 r600_pcie_gart_tlb_flush(rdev);
989 rdev->gart.ready = true;
993 void r600_pcie_gart_disable(struct radeon_device *rdev)
998 /* Disable all tables */
999 for (i = 0; i < 7; i++)
1000 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1002 /* Disable L2 cache */
1003 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1004 EFFECTIVE_L2_QUEUE_SIZE(7));
1005 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1006 /* Setup L1 TLB control */
1007 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1008 ENABLE_WAIT_L2_QUERY;
1009 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1010 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1011 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1014 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1015 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1016 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1017 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1018 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1019 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1020 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1021 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1022 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1023 if (rdev->gart.table.vram.robj) {
1024 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1025 if (likely(r == 0)) {
1026 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1027 radeon_bo_unpin(rdev->gart.table.vram.robj);
1028 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1033 void r600_pcie_gart_fini(struct radeon_device *rdev)
1035 radeon_gart_fini(rdev);
1036 r600_pcie_gart_disable(rdev);
1037 radeon_gart_table_vram_free(rdev);
1040 void r600_agp_enable(struct radeon_device *rdev)
1045 /* Setup L2 cache */
1046 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1047 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1048 EFFECTIVE_L2_QUEUE_SIZE(7));
1049 WREG32(VM_L2_CNTL2, 0);
1050 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1051 /* Setup TLB control */
1052 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1053 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1054 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1055 ENABLE_WAIT_L2_QUERY;
1056 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1057 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1058 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1059 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1060 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1061 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1062 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1063 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1064 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1065 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1066 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1067 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1068 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1069 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1070 for (i = 0; i < 7; i++)
1071 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1074 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1079 for (i = 0; i < rdev->usec_timeout; i++) {
1080 /* read MC_STATUS */
1081 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1089 static void r600_mc_program(struct radeon_device *rdev)
1091 struct rv515_mc_save save;
1095 /* Initialize HDP */
1096 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1097 WREG32((0x2c14 + j), 0x00000000);
1098 WREG32((0x2c18 + j), 0x00000000);
1099 WREG32((0x2c1c + j), 0x00000000);
1100 WREG32((0x2c20 + j), 0x00000000);
1101 WREG32((0x2c24 + j), 0x00000000);
1103 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1105 rv515_mc_stop(rdev, &save);
1106 if (r600_mc_wait_for_idle(rdev)) {
1107 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1109 /* Lockout access through VGA aperture (doesn't exist before R600) */
1110 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1111 /* Update configuration */
1112 if (rdev->flags & RADEON_IS_AGP) {
1113 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1114 /* VRAM before AGP */
1115 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1116 rdev->mc.vram_start >> 12);
1117 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1118 rdev->mc.gtt_end >> 12);
1120 /* VRAM after AGP */
1121 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1122 rdev->mc.gtt_start >> 12);
1123 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1124 rdev->mc.vram_end >> 12);
1127 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1128 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1130 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1131 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1132 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1133 WREG32(MC_VM_FB_LOCATION, tmp);
1134 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1135 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1136 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1137 if (rdev->flags & RADEON_IS_AGP) {
1138 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1139 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1140 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1142 WREG32(MC_VM_AGP_BASE, 0);
1143 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1144 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1146 if (r600_mc_wait_for_idle(rdev)) {
1147 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1149 rv515_mc_resume(rdev, &save);
1150 /* we need to own VRAM, so turn off the VGA renderer here
1151 * to stop it overwriting our objects */
1152 rv515_vga_render_disable(rdev);
1156 * r600_vram_gtt_location - try to find VRAM & GTT location
1157 * @rdev: radeon device structure holding all necessary informations
1158 * @mc: memory controller structure holding memory informations
1160 * Function will place try to place VRAM at same place as in CPU (PCI)
1161 * address space as some GPU seems to have issue when we reprogram at
1162 * different address space.
1164 * If there is not enough space to fit the unvisible VRAM after the
1165 * aperture then we limit the VRAM size to the aperture.
1167 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1168 * them to be in one from GPU point of view so that we can program GPU to
1169 * catch access outside them (weird GPU policy see ??).
1171 * This function will never fails, worst case are limiting VRAM or GTT.
1173 * Note: GTT start, end, size should be initialized before calling this
1174 * function on AGP platform.
1176 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1178 u64 size_bf, size_af;
1180 if (mc->mc_vram_size > 0xE0000000) {
1181 /* leave room for at least 512M GTT */
1182 dev_warn(rdev->dev, "limiting VRAM\n");
1183 mc->real_vram_size = 0xE0000000;
1184 mc->mc_vram_size = 0xE0000000;
1186 if (rdev->flags & RADEON_IS_AGP) {
1187 size_bf = mc->gtt_start;
1188 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1189 if (size_bf > size_af) {
1190 if (mc->mc_vram_size > size_bf) {
1191 dev_warn(rdev->dev, "limiting VRAM\n");
1192 mc->real_vram_size = size_bf;
1193 mc->mc_vram_size = size_bf;
1195 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1197 if (mc->mc_vram_size > size_af) {
1198 dev_warn(rdev->dev, "limiting VRAM\n");
1199 mc->real_vram_size = size_af;
1200 mc->mc_vram_size = size_af;
1202 mc->vram_start = mc->gtt_end;
1204 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1205 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1206 mc->mc_vram_size >> 20, mc->vram_start,
1207 mc->vram_end, mc->real_vram_size >> 20);
1210 if (rdev->flags & RADEON_IS_IGP) {
1211 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1214 radeon_vram_location(rdev, &rdev->mc, base);
1215 rdev->mc.gtt_base_align = 0;
1216 radeon_gtt_location(rdev, mc);
1220 int r600_mc_init(struct radeon_device *rdev)
1223 int chansize, numchan;
1225 /* Get VRAM informations */
1226 rdev->mc.vram_is_ddr = true;
1227 tmp = RREG32(RAMCFG);
1228 if (tmp & CHANSIZE_OVERRIDE) {
1230 } else if (tmp & CHANSIZE_MASK) {
1235 tmp = RREG32(CHMAP);
1236 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1251 rdev->mc.vram_width = numchan * chansize;
1252 /* Could aper size report 0 ? */
1253 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1254 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1255 /* Setup GPU memory space */
1256 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1257 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1258 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1259 r600_vram_gtt_location(rdev, &rdev->mc);
1261 if (rdev->flags & RADEON_IS_IGP) {
1262 rs690_pm_info(rdev);
1263 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1265 radeon_update_bandwidth_info(rdev);
1269 /* We doesn't check that the GPU really needs a reset we simply do the
1270 * reset, it's up to the caller to determine if the GPU needs one. We
1271 * might add an helper function to check that.
1273 int r600_gpu_soft_reset(struct radeon_device *rdev)
1275 struct rv515_mc_save save;
1276 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1277 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1278 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1279 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1280 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1281 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1282 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1283 S_008010_GUI_ACTIVE(1);
1284 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1285 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1286 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1287 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1288 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1289 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1290 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1291 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1294 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1297 dev_info(rdev->dev, "GPU softreset \n");
1298 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1299 RREG32(R_008010_GRBM_STATUS));
1300 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1301 RREG32(R_008014_GRBM_STATUS2));
1302 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1303 RREG32(R_000E50_SRBM_STATUS));
1304 rv515_mc_stop(rdev, &save);
1305 if (r600_mc_wait_for_idle(rdev)) {
1306 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1308 /* Disable CP parsing/prefetching */
1309 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1310 /* Check if any of the rendering block is busy and reset it */
1311 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1312 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1313 tmp = S_008020_SOFT_RESET_CR(1) |
1314 S_008020_SOFT_RESET_DB(1) |
1315 S_008020_SOFT_RESET_CB(1) |
1316 S_008020_SOFT_RESET_PA(1) |
1317 S_008020_SOFT_RESET_SC(1) |
1318 S_008020_SOFT_RESET_SMX(1) |
1319 S_008020_SOFT_RESET_SPI(1) |
1320 S_008020_SOFT_RESET_SX(1) |
1321 S_008020_SOFT_RESET_SH(1) |
1322 S_008020_SOFT_RESET_TC(1) |
1323 S_008020_SOFT_RESET_TA(1) |
1324 S_008020_SOFT_RESET_VC(1) |
1325 S_008020_SOFT_RESET_VGT(1);
1326 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1327 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1328 RREG32(R_008020_GRBM_SOFT_RESET);
1330 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1332 /* Reset CP (we always reset CP) */
1333 tmp = S_008020_SOFT_RESET_CP(1);
1334 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1335 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1336 RREG32(R_008020_GRBM_SOFT_RESET);
1338 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1339 /* Wait a little for things to settle down */
1341 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1342 RREG32(R_008010_GRBM_STATUS));
1343 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1344 RREG32(R_008014_GRBM_STATUS2));
1345 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1346 RREG32(R_000E50_SRBM_STATUS));
1347 rv515_mc_resume(rdev, &save);
1351 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1356 struct r100_gpu_lockup *lockup;
1359 if (rdev->family >= CHIP_RV770)
1360 lockup = &rdev->config.rv770.lockup;
1362 lockup = &rdev->config.r600.lockup;
1364 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1365 grbm_status = RREG32(R_008010_GRBM_STATUS);
1366 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1367 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1368 r100_gpu_lockup_update(lockup, &rdev->cp);
1371 /* force CP activities */
1372 r = radeon_ring_lock(rdev, 2);
1375 radeon_ring_write(rdev, 0x80000000);
1376 radeon_ring_write(rdev, 0x80000000);
1377 radeon_ring_unlock_commit(rdev);
1379 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1380 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
1383 int r600_asic_reset(struct radeon_device *rdev)
1385 return r600_gpu_soft_reset(rdev);
1388 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1390 u32 backend_disable_mask)
1392 u32 backend_map = 0;
1393 u32 enabled_backends_mask;
1394 u32 enabled_backends_count;
1396 u32 swizzle_pipe[R6XX_MAX_PIPES];
1400 if (num_tile_pipes > R6XX_MAX_PIPES)
1401 num_tile_pipes = R6XX_MAX_PIPES;
1402 if (num_tile_pipes < 1)
1404 if (num_backends > R6XX_MAX_BACKENDS)
1405 num_backends = R6XX_MAX_BACKENDS;
1406 if (num_backends < 1)
1409 enabled_backends_mask = 0;
1410 enabled_backends_count = 0;
1411 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1412 if (((backend_disable_mask >> i) & 1) == 0) {
1413 enabled_backends_mask |= (1 << i);
1414 ++enabled_backends_count;
1416 if (enabled_backends_count == num_backends)
1420 if (enabled_backends_count == 0) {
1421 enabled_backends_mask = 1;
1422 enabled_backends_count = 1;
1425 if (enabled_backends_count != num_backends)
1426 num_backends = enabled_backends_count;
1428 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1429 switch (num_tile_pipes) {
1431 swizzle_pipe[0] = 0;
1434 swizzle_pipe[0] = 0;
1435 swizzle_pipe[1] = 1;
1438 swizzle_pipe[0] = 0;
1439 swizzle_pipe[1] = 1;
1440 swizzle_pipe[2] = 2;
1443 swizzle_pipe[0] = 0;
1444 swizzle_pipe[1] = 1;
1445 swizzle_pipe[2] = 2;
1446 swizzle_pipe[3] = 3;
1449 swizzle_pipe[0] = 0;
1450 swizzle_pipe[1] = 1;
1451 swizzle_pipe[2] = 2;
1452 swizzle_pipe[3] = 3;
1453 swizzle_pipe[4] = 4;
1456 swizzle_pipe[0] = 0;
1457 swizzle_pipe[1] = 2;
1458 swizzle_pipe[2] = 4;
1459 swizzle_pipe[3] = 5;
1460 swizzle_pipe[4] = 1;
1461 swizzle_pipe[5] = 3;
1464 swizzle_pipe[0] = 0;
1465 swizzle_pipe[1] = 2;
1466 swizzle_pipe[2] = 4;
1467 swizzle_pipe[3] = 6;
1468 swizzle_pipe[4] = 1;
1469 swizzle_pipe[5] = 3;
1470 swizzle_pipe[6] = 5;
1473 swizzle_pipe[0] = 0;
1474 swizzle_pipe[1] = 2;
1475 swizzle_pipe[2] = 4;
1476 swizzle_pipe[3] = 6;
1477 swizzle_pipe[4] = 1;
1478 swizzle_pipe[5] = 3;
1479 swizzle_pipe[6] = 5;
1480 swizzle_pipe[7] = 7;
1485 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1486 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1487 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1489 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1491 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1497 int r600_count_pipe_bits(uint32_t val)
1501 for (i = 0; i < 32; i++) {
1508 void r600_gpu_init(struct radeon_device *rdev)
1513 u32 cc_rb_backend_disable;
1514 u32 cc_gc_shader_pipe_config;
1518 u32 sq_gpr_resource_mgmt_1 = 0;
1519 u32 sq_gpr_resource_mgmt_2 = 0;
1520 u32 sq_thread_resource_mgmt = 0;
1521 u32 sq_stack_resource_mgmt_1 = 0;
1522 u32 sq_stack_resource_mgmt_2 = 0;
1524 /* FIXME: implement */
1525 switch (rdev->family) {
1527 rdev->config.r600.max_pipes = 4;
1528 rdev->config.r600.max_tile_pipes = 8;
1529 rdev->config.r600.max_simds = 4;
1530 rdev->config.r600.max_backends = 4;
1531 rdev->config.r600.max_gprs = 256;
1532 rdev->config.r600.max_threads = 192;
1533 rdev->config.r600.max_stack_entries = 256;
1534 rdev->config.r600.max_hw_contexts = 8;
1535 rdev->config.r600.max_gs_threads = 16;
1536 rdev->config.r600.sx_max_export_size = 128;
1537 rdev->config.r600.sx_max_export_pos_size = 16;
1538 rdev->config.r600.sx_max_export_smx_size = 128;
1539 rdev->config.r600.sq_num_cf_insts = 2;
1543 rdev->config.r600.max_pipes = 2;
1544 rdev->config.r600.max_tile_pipes = 2;
1545 rdev->config.r600.max_simds = 3;
1546 rdev->config.r600.max_backends = 1;
1547 rdev->config.r600.max_gprs = 128;
1548 rdev->config.r600.max_threads = 192;
1549 rdev->config.r600.max_stack_entries = 128;
1550 rdev->config.r600.max_hw_contexts = 8;
1551 rdev->config.r600.max_gs_threads = 4;
1552 rdev->config.r600.sx_max_export_size = 128;
1553 rdev->config.r600.sx_max_export_pos_size = 16;
1554 rdev->config.r600.sx_max_export_smx_size = 128;
1555 rdev->config.r600.sq_num_cf_insts = 2;
1561 rdev->config.r600.max_pipes = 1;
1562 rdev->config.r600.max_tile_pipes = 1;
1563 rdev->config.r600.max_simds = 2;
1564 rdev->config.r600.max_backends = 1;
1565 rdev->config.r600.max_gprs = 128;
1566 rdev->config.r600.max_threads = 192;
1567 rdev->config.r600.max_stack_entries = 128;
1568 rdev->config.r600.max_hw_contexts = 4;
1569 rdev->config.r600.max_gs_threads = 4;
1570 rdev->config.r600.sx_max_export_size = 128;
1571 rdev->config.r600.sx_max_export_pos_size = 16;
1572 rdev->config.r600.sx_max_export_smx_size = 128;
1573 rdev->config.r600.sq_num_cf_insts = 1;
1576 rdev->config.r600.max_pipes = 4;
1577 rdev->config.r600.max_tile_pipes = 4;
1578 rdev->config.r600.max_simds = 4;
1579 rdev->config.r600.max_backends = 4;
1580 rdev->config.r600.max_gprs = 192;
1581 rdev->config.r600.max_threads = 192;
1582 rdev->config.r600.max_stack_entries = 256;
1583 rdev->config.r600.max_hw_contexts = 8;
1584 rdev->config.r600.max_gs_threads = 16;
1585 rdev->config.r600.sx_max_export_size = 128;
1586 rdev->config.r600.sx_max_export_pos_size = 16;
1587 rdev->config.r600.sx_max_export_smx_size = 128;
1588 rdev->config.r600.sq_num_cf_insts = 2;
1594 /* Initialize HDP */
1595 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1596 WREG32((0x2c14 + j), 0x00000000);
1597 WREG32((0x2c18 + j), 0x00000000);
1598 WREG32((0x2c1c + j), 0x00000000);
1599 WREG32((0x2c20 + j), 0x00000000);
1600 WREG32((0x2c24 + j), 0x00000000);
1603 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1607 ramcfg = RREG32(RAMCFG);
1608 switch (rdev->config.r600.max_tile_pipes) {
1610 tiling_config |= PIPE_TILING(0);
1613 tiling_config |= PIPE_TILING(1);
1616 tiling_config |= PIPE_TILING(2);
1619 tiling_config |= PIPE_TILING(3);
1624 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1625 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1626 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1627 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1628 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1629 rdev->config.r600.tiling_group_size = 512;
1631 rdev->config.r600.tiling_group_size = 256;
1632 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1634 tiling_config |= ROW_TILING(3);
1635 tiling_config |= SAMPLE_SPLIT(3);
1637 tiling_config |= ROW_TILING(tmp);
1638 tiling_config |= SAMPLE_SPLIT(tmp);
1640 tiling_config |= BANK_SWAPS(1);
1642 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1643 cc_rb_backend_disable |=
1644 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1646 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1647 cc_gc_shader_pipe_config |=
1648 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1649 cc_gc_shader_pipe_config |=
1650 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1652 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1653 (R6XX_MAX_BACKENDS -
1654 r600_count_pipe_bits((cc_rb_backend_disable &
1655 R6XX_MAX_BACKENDS_MASK) >> 16)),
1656 (cc_rb_backend_disable >> 16));
1657 rdev->config.r600.tile_config = tiling_config;
1658 tiling_config |= BACKEND_MAP(backend_map);
1659 WREG32(GB_TILING_CONFIG, tiling_config);
1660 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1661 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1664 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1665 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1666 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1668 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1669 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1670 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1672 /* Setup some CP states */
1673 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1674 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1676 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1677 SYNC_WALKER | SYNC_ALIGNER));
1678 /* Setup various GPU states */
1679 if (rdev->family == CHIP_RV670)
1680 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1682 tmp = RREG32(SX_DEBUG_1);
1683 tmp |= SMX_EVENT_RELEASE;
1684 if ((rdev->family > CHIP_R600))
1685 tmp |= ENABLE_NEW_SMX_ADDRESS;
1686 WREG32(SX_DEBUG_1, tmp);
1688 if (((rdev->family) == CHIP_R600) ||
1689 ((rdev->family) == CHIP_RV630) ||
1690 ((rdev->family) == CHIP_RV610) ||
1691 ((rdev->family) == CHIP_RV620) ||
1692 ((rdev->family) == CHIP_RS780) ||
1693 ((rdev->family) == CHIP_RS880)) {
1694 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1696 WREG32(DB_DEBUG, 0);
1698 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1699 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1701 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1702 WREG32(VGT_NUM_INSTANCES, 0);
1704 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1705 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1707 tmp = RREG32(SQ_MS_FIFO_SIZES);
1708 if (((rdev->family) == CHIP_RV610) ||
1709 ((rdev->family) == CHIP_RV620) ||
1710 ((rdev->family) == CHIP_RS780) ||
1711 ((rdev->family) == CHIP_RS880)) {
1712 tmp = (CACHE_FIFO_SIZE(0xa) |
1713 FETCH_FIFO_HIWATER(0xa) |
1714 DONE_FIFO_HIWATER(0xe0) |
1715 ALU_UPDATE_FIFO_HIWATER(0x8));
1716 } else if (((rdev->family) == CHIP_R600) ||
1717 ((rdev->family) == CHIP_RV630)) {
1718 tmp &= ~DONE_FIFO_HIWATER(0xff);
1719 tmp |= DONE_FIFO_HIWATER(0x4);
1721 WREG32(SQ_MS_FIFO_SIZES, tmp);
1723 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1724 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1726 sq_config = RREG32(SQ_CONFIG);
1727 sq_config &= ~(PS_PRIO(3) |
1731 sq_config |= (DX9_CONSTS |
1738 if ((rdev->family) == CHIP_R600) {
1739 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1741 NUM_CLAUSE_TEMP_GPRS(4));
1742 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1744 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1745 NUM_VS_THREADS(48) |
1748 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1749 NUM_VS_STACK_ENTRIES(128));
1750 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1751 NUM_ES_STACK_ENTRIES(0));
1752 } else if (((rdev->family) == CHIP_RV610) ||
1753 ((rdev->family) == CHIP_RV620) ||
1754 ((rdev->family) == CHIP_RS780) ||
1755 ((rdev->family) == CHIP_RS880)) {
1756 /* no vertex cache */
1757 sq_config &= ~VC_ENABLE;
1759 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1761 NUM_CLAUSE_TEMP_GPRS(2));
1762 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1764 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1765 NUM_VS_THREADS(78) |
1767 NUM_ES_THREADS(31));
1768 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1769 NUM_VS_STACK_ENTRIES(40));
1770 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1771 NUM_ES_STACK_ENTRIES(16));
1772 } else if (((rdev->family) == CHIP_RV630) ||
1773 ((rdev->family) == CHIP_RV635)) {
1774 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1776 NUM_CLAUSE_TEMP_GPRS(2));
1777 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1779 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1780 NUM_VS_THREADS(78) |
1782 NUM_ES_THREADS(31));
1783 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1784 NUM_VS_STACK_ENTRIES(40));
1785 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1786 NUM_ES_STACK_ENTRIES(16));
1787 } else if ((rdev->family) == CHIP_RV670) {
1788 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1790 NUM_CLAUSE_TEMP_GPRS(2));
1791 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1793 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1794 NUM_VS_THREADS(78) |
1796 NUM_ES_THREADS(31));
1797 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1798 NUM_VS_STACK_ENTRIES(64));
1799 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1800 NUM_ES_STACK_ENTRIES(64));
1803 WREG32(SQ_CONFIG, sq_config);
1804 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1805 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1806 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1807 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1808 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1810 if (((rdev->family) == CHIP_RV610) ||
1811 ((rdev->family) == CHIP_RV620) ||
1812 ((rdev->family) == CHIP_RS780) ||
1813 ((rdev->family) == CHIP_RS880)) {
1814 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1816 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1819 /* More default values. 2D/3D driver should adjust as needed */
1820 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1821 S1_X(0x4) | S1_Y(0xc)));
1822 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1823 S1_X(0x2) | S1_Y(0x2) |
1824 S2_X(0xa) | S2_Y(0x6) |
1825 S3_X(0x6) | S3_Y(0xa)));
1826 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1827 S1_X(0x4) | S1_Y(0xc) |
1828 S2_X(0x1) | S2_Y(0x6) |
1829 S3_X(0xa) | S3_Y(0xe)));
1830 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1831 S5_X(0x0) | S5_Y(0x0) |
1832 S6_X(0xb) | S6_Y(0x4) |
1833 S7_X(0x7) | S7_Y(0x8)));
1835 WREG32(VGT_STRMOUT_EN, 0);
1836 tmp = rdev->config.r600.max_pipes * 16;
1837 switch (rdev->family) {
1853 WREG32(VGT_ES_PER_GS, 128);
1854 WREG32(VGT_GS_PER_ES, tmp);
1855 WREG32(VGT_GS_PER_VS, 2);
1856 WREG32(VGT_GS_VERTEX_REUSE, 16);
1858 /* more default values. 2D/3D driver should adjust as needed */
1859 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1860 WREG32(VGT_STRMOUT_EN, 0);
1862 WREG32(PA_SC_MODE_CNTL, 0);
1863 WREG32(PA_SC_AA_CONFIG, 0);
1864 WREG32(PA_SC_LINE_STIPPLE, 0);
1865 WREG32(SPI_INPUT_Z, 0);
1866 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1867 WREG32(CB_COLOR7_FRAG, 0);
1869 /* Clear render buffer base addresses */
1870 WREG32(CB_COLOR0_BASE, 0);
1871 WREG32(CB_COLOR1_BASE, 0);
1872 WREG32(CB_COLOR2_BASE, 0);
1873 WREG32(CB_COLOR3_BASE, 0);
1874 WREG32(CB_COLOR4_BASE, 0);
1875 WREG32(CB_COLOR5_BASE, 0);
1876 WREG32(CB_COLOR6_BASE, 0);
1877 WREG32(CB_COLOR7_BASE, 0);
1878 WREG32(CB_COLOR7_FRAG, 0);
1880 switch (rdev->family) {
1885 tmp = TC_L2_SIZE(8);
1889 tmp = TC_L2_SIZE(4);
1892 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1895 tmp = TC_L2_SIZE(0);
1898 WREG32(TC_CNTL, tmp);
1900 tmp = RREG32(HDP_HOST_PATH_CNTL);
1901 WREG32(HDP_HOST_PATH_CNTL, tmp);
1903 tmp = RREG32(ARB_POP);
1904 tmp |= ENABLE_TC128;
1905 WREG32(ARB_POP, tmp);
1907 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1908 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1910 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1915 * Indirect registers accessor
1917 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1921 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1922 (void)RREG32(PCIE_PORT_INDEX);
1923 r = RREG32(PCIE_PORT_DATA);
1927 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1929 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1930 (void)RREG32(PCIE_PORT_INDEX);
1931 WREG32(PCIE_PORT_DATA, (v));
1932 (void)RREG32(PCIE_PORT_DATA);
1938 void r600_cp_stop(struct radeon_device *rdev)
1940 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1942 WREG32(SCRATCH_UMSK, 0);
1945 int r600_init_microcode(struct radeon_device *rdev)
1947 struct platform_device *pdev;
1948 const char *chip_name;
1949 const char *rlc_chip_name;
1950 size_t pfp_req_size, me_req_size, rlc_req_size;
1956 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1959 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1963 switch (rdev->family) {
1966 rlc_chip_name = "R600";
1969 chip_name = "RV610";
1970 rlc_chip_name = "R600";
1973 chip_name = "RV630";
1974 rlc_chip_name = "R600";
1977 chip_name = "RV620";
1978 rlc_chip_name = "R600";
1981 chip_name = "RV635";
1982 rlc_chip_name = "R600";
1985 chip_name = "RV670";
1986 rlc_chip_name = "R600";
1990 chip_name = "RS780";
1991 rlc_chip_name = "R600";
1994 chip_name = "RV770";
1995 rlc_chip_name = "R700";
1999 chip_name = "RV730";
2000 rlc_chip_name = "R700";
2003 chip_name = "RV710";
2004 rlc_chip_name = "R700";
2007 chip_name = "CEDAR";
2008 rlc_chip_name = "CEDAR";
2011 chip_name = "REDWOOD";
2012 rlc_chip_name = "REDWOOD";
2015 chip_name = "JUNIPER";
2016 rlc_chip_name = "JUNIPER";
2020 chip_name = "CYPRESS";
2021 rlc_chip_name = "CYPRESS";
2025 rlc_chip_name = "SUMO";
2030 if (rdev->family >= CHIP_CEDAR) {
2031 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2032 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2033 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2034 } else if (rdev->family >= CHIP_RV770) {
2035 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2036 me_req_size = R700_PM4_UCODE_SIZE * 4;
2037 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2039 pfp_req_size = PFP_UCODE_SIZE * 4;
2040 me_req_size = PM4_UCODE_SIZE * 12;
2041 rlc_req_size = RLC_UCODE_SIZE * 4;
2044 DRM_INFO("Loading %s Microcode\n", chip_name);
2046 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2047 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2050 if (rdev->pfp_fw->size != pfp_req_size) {
2052 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2053 rdev->pfp_fw->size, fw_name);
2058 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2059 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2062 if (rdev->me_fw->size != me_req_size) {
2064 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2065 rdev->me_fw->size, fw_name);
2069 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2070 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2073 if (rdev->rlc_fw->size != rlc_req_size) {
2075 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2076 rdev->rlc_fw->size, fw_name);
2081 platform_device_unregister(pdev);
2086 "r600_cp: Failed to load firmware \"%s\"\n",
2088 release_firmware(rdev->pfp_fw);
2089 rdev->pfp_fw = NULL;
2090 release_firmware(rdev->me_fw);
2092 release_firmware(rdev->rlc_fw);
2093 rdev->rlc_fw = NULL;
2098 static int r600_cp_load_microcode(struct radeon_device *rdev)
2100 const __be32 *fw_data;
2103 if (!rdev->me_fw || !rdev->pfp_fw)
2112 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2115 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2116 RREG32(GRBM_SOFT_RESET);
2118 WREG32(GRBM_SOFT_RESET, 0);
2120 WREG32(CP_ME_RAM_WADDR, 0);
2122 fw_data = (const __be32 *)rdev->me_fw->data;
2123 WREG32(CP_ME_RAM_WADDR, 0);
2124 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2125 WREG32(CP_ME_RAM_DATA,
2126 be32_to_cpup(fw_data++));
2128 fw_data = (const __be32 *)rdev->pfp_fw->data;
2129 WREG32(CP_PFP_UCODE_ADDR, 0);
2130 for (i = 0; i < PFP_UCODE_SIZE; i++)
2131 WREG32(CP_PFP_UCODE_DATA,
2132 be32_to_cpup(fw_data++));
2134 WREG32(CP_PFP_UCODE_ADDR, 0);
2135 WREG32(CP_ME_RAM_WADDR, 0);
2136 WREG32(CP_ME_RAM_RADDR, 0);
2140 int r600_cp_start(struct radeon_device *rdev)
2145 r = radeon_ring_lock(rdev, 7);
2147 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2150 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2151 radeon_ring_write(rdev, 0x1);
2152 if (rdev->family >= CHIP_RV770) {
2153 radeon_ring_write(rdev, 0x0);
2154 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2156 radeon_ring_write(rdev, 0x3);
2157 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2159 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2160 radeon_ring_write(rdev, 0);
2161 radeon_ring_write(rdev, 0);
2162 radeon_ring_unlock_commit(rdev);
2165 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2169 int r600_cp_resume(struct radeon_device *rdev)
2176 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2177 RREG32(GRBM_SOFT_RESET);
2179 WREG32(GRBM_SOFT_RESET, 0);
2181 /* Set ring buffer size */
2182 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2183 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2185 tmp |= BUF_SWAP_32BIT;
2187 WREG32(CP_RB_CNTL, tmp);
2188 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2190 /* Set the write pointer delay */
2191 WREG32(CP_RB_WPTR_DELAY, 0);
2193 /* Initialize the ring buffer's read and write pointers */
2194 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2195 WREG32(CP_RB_RPTR_WR, 0);
2196 WREG32(CP_RB_WPTR, 0);
2198 /* set the wb address whether it's enabled or not */
2199 WREG32(CP_RB_RPTR_ADDR,
2203 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2204 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2205 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2207 if (rdev->wb.enabled)
2208 WREG32(SCRATCH_UMSK, 0xff);
2210 tmp |= RB_NO_UPDATE;
2211 WREG32(SCRATCH_UMSK, 0);
2215 WREG32(CP_RB_CNTL, tmp);
2217 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2218 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2220 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2221 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2223 r600_cp_start(rdev);
2224 rdev->cp.ready = true;
2225 r = radeon_ring_test(rdev);
2227 rdev->cp.ready = false;
2233 void r600_cp_commit(struct radeon_device *rdev)
2235 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2236 (void)RREG32(CP_RB_WPTR);
2239 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2243 /* Align ring size */
2244 rb_bufsz = drm_order(ring_size / 8);
2245 ring_size = (1 << (rb_bufsz + 1)) * 4;
2246 rdev->cp.ring_size = ring_size;
2247 rdev->cp.align_mask = 16 - 1;
2250 void r600_cp_fini(struct radeon_device *rdev)
2253 radeon_ring_fini(rdev);
2258 * GPU scratch registers helpers function.
2260 void r600_scratch_init(struct radeon_device *rdev)
2264 rdev->scratch.num_reg = 7;
2265 rdev->scratch.reg_base = SCRATCH_REG0;
2266 for (i = 0; i < rdev->scratch.num_reg; i++) {
2267 rdev->scratch.free[i] = true;
2268 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2272 int r600_ring_test(struct radeon_device *rdev)
2279 r = radeon_scratch_get(rdev, &scratch);
2281 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2284 WREG32(scratch, 0xCAFEDEAD);
2285 r = radeon_ring_lock(rdev, 3);
2287 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2288 radeon_scratch_free(rdev, scratch);
2291 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2292 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2293 radeon_ring_write(rdev, 0xDEADBEEF);
2294 radeon_ring_unlock_commit(rdev);
2295 for (i = 0; i < rdev->usec_timeout; i++) {
2296 tmp = RREG32(scratch);
2297 if (tmp == 0xDEADBEEF)
2301 if (i < rdev->usec_timeout) {
2302 DRM_INFO("ring test succeeded in %d usecs\n", i);
2304 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2308 radeon_scratch_free(rdev, scratch);
2312 void r600_fence_ring_emit(struct radeon_device *rdev,
2313 struct radeon_fence *fence)
2315 if (rdev->wb.use_event) {
2316 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2317 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2318 /* EVENT_WRITE_EOP - flush caches, send int */
2319 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2320 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2321 radeon_ring_write(rdev, addr & 0xffffffff);
2322 radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2323 radeon_ring_write(rdev, fence->seq);
2324 radeon_ring_write(rdev, 0);
2326 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2327 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2328 /* wait for 3D idle clean */
2329 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2330 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2331 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2332 /* Emit fence sequence & fire IRQ */
2333 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2334 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2335 radeon_ring_write(rdev, fence->seq);
2336 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2337 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2338 radeon_ring_write(rdev, RB_INT_STAT);
2342 int r600_copy_blit(struct radeon_device *rdev,
2343 uint64_t src_offset, uint64_t dst_offset,
2344 unsigned num_pages, struct radeon_fence *fence)
2348 mutex_lock(&rdev->r600_blit.mutex);
2349 rdev->r600_blit.vb_ib = NULL;
2350 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2352 if (rdev->r600_blit.vb_ib)
2353 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2354 mutex_unlock(&rdev->r600_blit.mutex);
2357 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2358 r600_blit_done_copy(rdev, fence);
2359 mutex_unlock(&rdev->r600_blit.mutex);
2363 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2364 uint32_t tiling_flags, uint32_t pitch,
2365 uint32_t offset, uint32_t obj_size)
2367 /* FIXME: implement */
2371 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2373 /* FIXME: implement */
2376 int r600_startup(struct radeon_device *rdev)
2380 /* enable pcie gen2 link */
2381 r600_pcie_gen2_enable(rdev);
2383 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2384 r = r600_init_microcode(rdev);
2386 DRM_ERROR("Failed to load firmware!\n");
2391 r600_mc_program(rdev);
2392 if (rdev->flags & RADEON_IS_AGP) {
2393 r600_agp_enable(rdev);
2395 r = r600_pcie_gart_enable(rdev);
2399 r600_gpu_init(rdev);
2400 r = r600_blit_init(rdev);
2402 r600_blit_fini(rdev);
2403 rdev->asic->copy = NULL;
2404 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2407 /* allocate wb buffer */
2408 r = radeon_wb_init(rdev);
2413 r = r600_irq_init(rdev);
2415 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2416 radeon_irq_kms_fini(rdev);
2421 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2424 r = r600_cp_load_microcode(rdev);
2427 r = r600_cp_resume(rdev);
2434 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2438 temp = RREG32(CONFIG_CNTL);
2439 if (state == false) {
2445 WREG32(CONFIG_CNTL, temp);
2448 int r600_resume(struct radeon_device *rdev)
2452 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2453 * posting will perform necessary task to bring back GPU into good
2457 atom_asic_init(rdev->mode_info.atom_context);
2459 r = r600_startup(rdev);
2461 DRM_ERROR("r600 startup failed on resume\n");
2465 r = r600_ib_test(rdev);
2467 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2471 r = r600_audio_init(rdev);
2473 DRM_ERROR("radeon: audio resume failed\n");
2480 int r600_suspend(struct radeon_device *rdev)
2484 r600_audio_fini(rdev);
2485 /* FIXME: we should wait for ring to be empty */
2487 rdev->cp.ready = false;
2488 r600_irq_suspend(rdev);
2489 radeon_wb_disable(rdev);
2490 r600_pcie_gart_disable(rdev);
2491 /* unpin shaders bo */
2492 if (rdev->r600_blit.shader_obj) {
2493 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2495 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2496 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2502 /* Plan is to move initialization in that function and use
2503 * helper function so that radeon_device_init pretty much
2504 * do nothing more than calling asic specific function. This
2505 * should also allow to remove a bunch of callback function
2508 int r600_init(struct radeon_device *rdev)
2512 r = radeon_dummy_page_init(rdev);
2515 if (r600_debugfs_mc_info_init(rdev)) {
2516 DRM_ERROR("Failed to register debugfs file for mc !\n");
2518 /* This don't do much */
2519 r = radeon_gem_init(rdev);
2523 if (!radeon_get_bios(rdev)) {
2524 if (ASIC_IS_AVIVO(rdev))
2527 /* Must be an ATOMBIOS */
2528 if (!rdev->is_atom_bios) {
2529 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2532 r = radeon_atombios_init(rdev);
2535 /* Post card if necessary */
2536 if (!radeon_card_posted(rdev)) {
2538 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2541 DRM_INFO("GPU not posted. posting now...\n");
2542 atom_asic_init(rdev->mode_info.atom_context);
2544 /* Initialize scratch registers */
2545 r600_scratch_init(rdev);
2546 /* Initialize surface registers */
2547 radeon_surface_init(rdev);
2548 /* Initialize clocks */
2549 radeon_get_clock_info(rdev->ddev);
2551 r = radeon_fence_driver_init(rdev);
2554 if (rdev->flags & RADEON_IS_AGP) {
2555 r = radeon_agp_init(rdev);
2557 radeon_agp_disable(rdev);
2559 r = r600_mc_init(rdev);
2562 /* Memory manager */
2563 r = radeon_bo_init(rdev);
2567 r = radeon_irq_kms_init(rdev);
2571 rdev->cp.ring_obj = NULL;
2572 r600_ring_init(rdev, 1024 * 1024);
2574 rdev->ih.ring_obj = NULL;
2575 r600_ih_ring_init(rdev, 64 * 1024);
2577 r = r600_pcie_gart_init(rdev);
2581 rdev->accel_working = true;
2582 r = r600_startup(rdev);
2584 dev_err(rdev->dev, "disabling GPU acceleration\n");
2586 r600_irq_fini(rdev);
2587 radeon_wb_fini(rdev);
2588 radeon_irq_kms_fini(rdev);
2589 r600_pcie_gart_fini(rdev);
2590 rdev->accel_working = false;
2592 if (rdev->accel_working) {
2593 r = radeon_ib_pool_init(rdev);
2595 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2596 rdev->accel_working = false;
2598 r = r600_ib_test(rdev);
2600 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2601 rdev->accel_working = false;
2606 r = r600_audio_init(rdev);
2608 return r; /* TODO error handling */
2612 void r600_fini(struct radeon_device *rdev)
2614 r600_audio_fini(rdev);
2615 r600_blit_fini(rdev);
2617 r600_irq_fini(rdev);
2618 radeon_wb_fini(rdev);
2619 radeon_irq_kms_fini(rdev);
2620 r600_pcie_gart_fini(rdev);
2621 radeon_agp_fini(rdev);
2622 radeon_gem_fini(rdev);
2623 radeon_fence_driver_fini(rdev);
2624 radeon_bo_fini(rdev);
2625 radeon_atombios_fini(rdev);
2628 radeon_dummy_page_fini(rdev);
2635 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2637 /* FIXME: implement */
2638 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2639 radeon_ring_write(rdev,
2643 (ib->gpu_addr & 0xFFFFFFFC));
2644 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2645 radeon_ring_write(rdev, ib->length_dw);
2648 int r600_ib_test(struct radeon_device *rdev)
2650 struct radeon_ib *ib;
2656 r = radeon_scratch_get(rdev, &scratch);
2658 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2661 WREG32(scratch, 0xCAFEDEAD);
2662 r = radeon_ib_get(rdev, &ib);
2664 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2667 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2668 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2669 ib->ptr[2] = 0xDEADBEEF;
2670 ib->ptr[3] = PACKET2(0);
2671 ib->ptr[4] = PACKET2(0);
2672 ib->ptr[5] = PACKET2(0);
2673 ib->ptr[6] = PACKET2(0);
2674 ib->ptr[7] = PACKET2(0);
2675 ib->ptr[8] = PACKET2(0);
2676 ib->ptr[9] = PACKET2(0);
2677 ib->ptr[10] = PACKET2(0);
2678 ib->ptr[11] = PACKET2(0);
2679 ib->ptr[12] = PACKET2(0);
2680 ib->ptr[13] = PACKET2(0);
2681 ib->ptr[14] = PACKET2(0);
2682 ib->ptr[15] = PACKET2(0);
2684 r = radeon_ib_schedule(rdev, ib);
2686 radeon_scratch_free(rdev, scratch);
2687 radeon_ib_free(rdev, &ib);
2688 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2691 r = radeon_fence_wait(ib->fence, false);
2693 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2696 for (i = 0; i < rdev->usec_timeout; i++) {
2697 tmp = RREG32(scratch);
2698 if (tmp == 0xDEADBEEF)
2702 if (i < rdev->usec_timeout) {
2703 DRM_INFO("ib test succeeded in %u usecs\n", i);
2705 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2709 radeon_scratch_free(rdev, scratch);
2710 radeon_ib_free(rdev, &ib);
2717 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2718 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2719 * writing to the ring and the GPU consuming, the GPU writes to the ring
2720 * and host consumes. As the host irq handler processes interrupts, it
2721 * increments the rptr. When the rptr catches up with the wptr, all the
2722 * current interrupts have been processed.
2725 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2729 /* Align ring size */
2730 rb_bufsz = drm_order(ring_size / 4);
2731 ring_size = (1 << rb_bufsz) * 4;
2732 rdev->ih.ring_size = ring_size;
2733 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2737 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2741 /* Allocate ring buffer */
2742 if (rdev->ih.ring_obj == NULL) {
2743 r = radeon_bo_create(rdev, rdev->ih.ring_size,
2745 RADEON_GEM_DOMAIN_GTT,
2746 &rdev->ih.ring_obj);
2748 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2751 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2752 if (unlikely(r != 0))
2754 r = radeon_bo_pin(rdev->ih.ring_obj,
2755 RADEON_GEM_DOMAIN_GTT,
2756 &rdev->ih.gpu_addr);
2758 radeon_bo_unreserve(rdev->ih.ring_obj);
2759 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2762 r = radeon_bo_kmap(rdev->ih.ring_obj,
2763 (void **)&rdev->ih.ring);
2764 radeon_bo_unreserve(rdev->ih.ring_obj);
2766 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2773 static void r600_ih_ring_fini(struct radeon_device *rdev)
2776 if (rdev->ih.ring_obj) {
2777 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2778 if (likely(r == 0)) {
2779 radeon_bo_kunmap(rdev->ih.ring_obj);
2780 radeon_bo_unpin(rdev->ih.ring_obj);
2781 radeon_bo_unreserve(rdev->ih.ring_obj);
2783 radeon_bo_unref(&rdev->ih.ring_obj);
2784 rdev->ih.ring = NULL;
2785 rdev->ih.ring_obj = NULL;
2789 void r600_rlc_stop(struct radeon_device *rdev)
2792 if ((rdev->family >= CHIP_RV770) &&
2793 (rdev->family <= CHIP_RV740)) {
2794 /* r7xx asics need to soft reset RLC before halting */
2795 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2796 RREG32(SRBM_SOFT_RESET);
2798 WREG32(SRBM_SOFT_RESET, 0);
2799 RREG32(SRBM_SOFT_RESET);
2802 WREG32(RLC_CNTL, 0);
2805 static void r600_rlc_start(struct radeon_device *rdev)
2807 WREG32(RLC_CNTL, RLC_ENABLE);
2810 static int r600_rlc_init(struct radeon_device *rdev)
2813 const __be32 *fw_data;
2818 r600_rlc_stop(rdev);
2820 WREG32(RLC_HB_BASE, 0);
2821 WREG32(RLC_HB_CNTL, 0);
2822 WREG32(RLC_HB_RPTR, 0);
2823 WREG32(RLC_HB_WPTR, 0);
2824 if (rdev->family <= CHIP_CAICOS) {
2825 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2826 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2828 WREG32(RLC_MC_CNTL, 0);
2829 WREG32(RLC_UCODE_CNTL, 0);
2831 fw_data = (const __be32 *)rdev->rlc_fw->data;
2832 if (rdev->family >= CHIP_CAYMAN) {
2833 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2834 WREG32(RLC_UCODE_ADDR, i);
2835 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2837 } else if (rdev->family >= CHIP_CEDAR) {
2838 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2839 WREG32(RLC_UCODE_ADDR, i);
2840 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2842 } else if (rdev->family >= CHIP_RV770) {
2843 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2844 WREG32(RLC_UCODE_ADDR, i);
2845 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2848 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2849 WREG32(RLC_UCODE_ADDR, i);
2850 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2853 WREG32(RLC_UCODE_ADDR, 0);
2855 r600_rlc_start(rdev);
2860 static void r600_enable_interrupts(struct radeon_device *rdev)
2862 u32 ih_cntl = RREG32(IH_CNTL);
2863 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2865 ih_cntl |= ENABLE_INTR;
2866 ih_rb_cntl |= IH_RB_ENABLE;
2867 WREG32(IH_CNTL, ih_cntl);
2868 WREG32(IH_RB_CNTL, ih_rb_cntl);
2869 rdev->ih.enabled = true;
2872 void r600_disable_interrupts(struct radeon_device *rdev)
2874 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2875 u32 ih_cntl = RREG32(IH_CNTL);
2877 ih_rb_cntl &= ~IH_RB_ENABLE;
2878 ih_cntl &= ~ENABLE_INTR;
2879 WREG32(IH_RB_CNTL, ih_rb_cntl);
2880 WREG32(IH_CNTL, ih_cntl);
2881 /* set rptr, wptr to 0 */
2882 WREG32(IH_RB_RPTR, 0);
2883 WREG32(IH_RB_WPTR, 0);
2884 rdev->ih.enabled = false;
2889 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2893 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2894 WREG32(GRBM_INT_CNTL, 0);
2895 WREG32(DxMODE_INT_MASK, 0);
2896 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2897 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2898 if (ASIC_IS_DCE3(rdev)) {
2899 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2900 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2901 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2902 WREG32(DC_HPD1_INT_CONTROL, tmp);
2903 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2904 WREG32(DC_HPD2_INT_CONTROL, tmp);
2905 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2906 WREG32(DC_HPD3_INT_CONTROL, tmp);
2907 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2908 WREG32(DC_HPD4_INT_CONTROL, tmp);
2909 if (ASIC_IS_DCE32(rdev)) {
2910 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2911 WREG32(DC_HPD5_INT_CONTROL, tmp);
2912 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2913 WREG32(DC_HPD6_INT_CONTROL, tmp);
2916 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2917 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2918 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2919 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2920 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2921 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2922 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2923 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2927 int r600_irq_init(struct radeon_device *rdev)
2931 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2934 ret = r600_ih_ring_alloc(rdev);
2939 r600_disable_interrupts(rdev);
2942 ret = r600_rlc_init(rdev);
2944 r600_ih_ring_fini(rdev);
2948 /* setup interrupt control */
2949 /* set dummy read address to ring address */
2950 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2951 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2952 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2953 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2955 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2956 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2957 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2958 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2960 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2961 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2963 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2964 IH_WPTR_OVERFLOW_CLEAR |
2967 if (rdev->wb.enabled)
2968 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2970 /* set the writeback address whether it's enabled or not */
2971 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2972 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
2974 WREG32(IH_RB_CNTL, ih_rb_cntl);
2976 /* set rptr, wptr to 0 */
2977 WREG32(IH_RB_RPTR, 0);
2978 WREG32(IH_RB_WPTR, 0);
2980 /* Default settings for IH_CNTL (disabled at first) */
2981 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2982 /* RPTR_REARM only works if msi's are enabled */
2983 if (rdev->msi_enabled)
2984 ih_cntl |= RPTR_REARM;
2987 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2989 WREG32(IH_CNTL, ih_cntl);
2991 /* force the active interrupt state to all disabled */
2992 if (rdev->family >= CHIP_CEDAR)
2993 evergreen_disable_interrupt_state(rdev);
2995 r600_disable_interrupt_state(rdev);
2998 r600_enable_interrupts(rdev);
3003 void r600_irq_suspend(struct radeon_device *rdev)
3005 r600_irq_disable(rdev);
3006 r600_rlc_stop(rdev);
3009 void r600_irq_fini(struct radeon_device *rdev)
3011 r600_irq_suspend(rdev);
3012 r600_ih_ring_fini(rdev);
3015 int r600_irq_set(struct radeon_device *rdev)
3017 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3019 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3020 u32 grbm_int_cntl = 0;
3022 u32 d1grph = 0, d2grph = 0;
3024 if (!rdev->irq.installed) {
3025 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3028 /* don't enable anything if the ih is disabled */
3029 if (!rdev->ih.enabled) {
3030 r600_disable_interrupts(rdev);
3031 /* force the active interrupt state to all disabled */
3032 r600_disable_interrupt_state(rdev);
3036 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3037 if (ASIC_IS_DCE3(rdev)) {
3038 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3039 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3040 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3041 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3042 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3043 if (ASIC_IS_DCE32(rdev)) {
3044 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3045 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3048 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3049 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3050 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3051 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3054 if (rdev->irq.sw_int) {
3055 DRM_DEBUG("r600_irq_set: sw int\n");
3056 cp_int_cntl |= RB_INT_ENABLE;
3057 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3059 if (rdev->irq.crtc_vblank_int[0] ||
3060 rdev->irq.pflip[0]) {
3061 DRM_DEBUG("r600_irq_set: vblank 0\n");
3062 mode_int |= D1MODE_VBLANK_INT_MASK;
3064 if (rdev->irq.crtc_vblank_int[1] ||
3065 rdev->irq.pflip[1]) {
3066 DRM_DEBUG("r600_irq_set: vblank 1\n");
3067 mode_int |= D2MODE_VBLANK_INT_MASK;
3069 if (rdev->irq.hpd[0]) {
3070 DRM_DEBUG("r600_irq_set: hpd 1\n");
3071 hpd1 |= DC_HPDx_INT_EN;
3073 if (rdev->irq.hpd[1]) {
3074 DRM_DEBUG("r600_irq_set: hpd 2\n");
3075 hpd2 |= DC_HPDx_INT_EN;
3077 if (rdev->irq.hpd[2]) {
3078 DRM_DEBUG("r600_irq_set: hpd 3\n");
3079 hpd3 |= DC_HPDx_INT_EN;
3081 if (rdev->irq.hpd[3]) {
3082 DRM_DEBUG("r600_irq_set: hpd 4\n");
3083 hpd4 |= DC_HPDx_INT_EN;
3085 if (rdev->irq.hpd[4]) {
3086 DRM_DEBUG("r600_irq_set: hpd 5\n");
3087 hpd5 |= DC_HPDx_INT_EN;
3089 if (rdev->irq.hpd[5]) {
3090 DRM_DEBUG("r600_irq_set: hpd 6\n");
3091 hpd6 |= DC_HPDx_INT_EN;
3093 if (rdev->irq.hdmi[0]) {
3094 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3095 hdmi1 |= R600_HDMI_INT_EN;
3097 if (rdev->irq.hdmi[1]) {
3098 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3099 hdmi2 |= R600_HDMI_INT_EN;
3101 if (rdev->irq.gui_idle) {
3102 DRM_DEBUG("gui idle\n");
3103 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3106 WREG32(CP_INT_CNTL, cp_int_cntl);
3107 WREG32(DxMODE_INT_MASK, mode_int);
3108 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3109 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3110 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3111 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3112 if (ASIC_IS_DCE3(rdev)) {
3113 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3114 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3115 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3116 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3117 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3118 if (ASIC_IS_DCE32(rdev)) {
3119 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3120 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3123 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3124 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3125 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3126 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3132 static inline void r600_irq_ack(struct radeon_device *rdev)
3136 if (ASIC_IS_DCE3(rdev)) {
3137 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3138 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3139 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3141 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3142 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3143 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3145 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3146 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3148 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3149 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3150 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3151 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3152 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3153 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3154 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3155 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3156 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3157 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3158 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3159 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3160 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3161 if (ASIC_IS_DCE3(rdev)) {
3162 tmp = RREG32(DC_HPD1_INT_CONTROL);
3163 tmp |= DC_HPDx_INT_ACK;
3164 WREG32(DC_HPD1_INT_CONTROL, tmp);
3166 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3167 tmp |= DC_HPDx_INT_ACK;
3168 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3171 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3172 if (ASIC_IS_DCE3(rdev)) {
3173 tmp = RREG32(DC_HPD2_INT_CONTROL);
3174 tmp |= DC_HPDx_INT_ACK;
3175 WREG32(DC_HPD2_INT_CONTROL, tmp);
3177 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3178 tmp |= DC_HPDx_INT_ACK;
3179 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3182 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3183 if (ASIC_IS_DCE3(rdev)) {
3184 tmp = RREG32(DC_HPD3_INT_CONTROL);
3185 tmp |= DC_HPDx_INT_ACK;
3186 WREG32(DC_HPD3_INT_CONTROL, tmp);
3188 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3189 tmp |= DC_HPDx_INT_ACK;
3190 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3193 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3194 tmp = RREG32(DC_HPD4_INT_CONTROL);
3195 tmp |= DC_HPDx_INT_ACK;
3196 WREG32(DC_HPD4_INT_CONTROL, tmp);
3198 if (ASIC_IS_DCE32(rdev)) {
3199 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3200 tmp = RREG32(DC_HPD5_INT_CONTROL);
3201 tmp |= DC_HPDx_INT_ACK;
3202 WREG32(DC_HPD5_INT_CONTROL, tmp);
3204 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3205 tmp = RREG32(DC_HPD5_INT_CONTROL);
3206 tmp |= DC_HPDx_INT_ACK;
3207 WREG32(DC_HPD6_INT_CONTROL, tmp);
3210 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3211 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3213 if (ASIC_IS_DCE3(rdev)) {
3214 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3215 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3218 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3219 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3224 void r600_irq_disable(struct radeon_device *rdev)
3226 r600_disable_interrupts(rdev);
3227 /* Wait and acknowledge irq */
3230 r600_disable_interrupt_state(rdev);
3233 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3237 if (rdev->wb.enabled)
3238 wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
3240 wptr = RREG32(IH_RB_WPTR);
3242 if (wptr & RB_OVERFLOW) {
3243 /* When a ring buffer overflow happen start parsing interrupt
3244 * from the last not overwritten vector (wptr + 16). Hopefully
3245 * this should allow us to catchup.
3247 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3248 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3249 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3250 tmp = RREG32(IH_RB_CNTL);
3251 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3252 WREG32(IH_RB_CNTL, tmp);
3254 return (wptr & rdev->ih.ptr_mask);
3258 * Each IV ring entry is 128 bits:
3259 * [7:0] - interrupt source id
3261 * [59:32] - interrupt source data
3262 * [127:60] - reserved
3264 * The basic interrupt vector entries
3265 * are decoded as follows:
3266 * src_id src_data description
3271 * 19 0 FP Hot plug detection A
3272 * 19 1 FP Hot plug detection B
3273 * 19 2 DAC A auto-detection
3274 * 19 3 DAC B auto-detection
3280 * 181 - EOP Interrupt
3283 * Note, these are based on r600 and may need to be
3284 * adjusted or added to on newer asics
3287 int r600_irq_process(struct radeon_device *rdev)
3289 u32 wptr = r600_get_ih_wptr(rdev);
3290 u32 rptr = rdev->ih.rptr;
3291 u32 src_id, src_data;
3293 unsigned long flags;
3294 bool queue_hotplug = false;
3296 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3297 if (!rdev->ih.enabled)
3300 spin_lock_irqsave(&rdev->ih.lock, flags);
3303 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3306 if (rdev->shutdown) {
3307 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3312 /* display interrupts */
3315 rdev->ih.wptr = wptr;
3316 while (rptr != wptr) {
3317 /* wptr/rptr are in bytes! */
3318 ring_index = rptr / 4;
3319 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3320 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3323 case 1: /* D1 vblank/vline */
3325 case 0: /* D1 vblank */
3326 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3327 if (rdev->irq.crtc_vblank_int[0]) {
3328 drm_handle_vblank(rdev->ddev, 0);
3329 rdev->pm.vblank_sync = true;
3330 wake_up(&rdev->irq.vblank_queue);
3332 if (rdev->irq.pflip[0])
3333 radeon_crtc_handle_flip(rdev, 0);
3334 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3335 DRM_DEBUG("IH: D1 vblank\n");
3338 case 1: /* D1 vline */
3339 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3340 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3341 DRM_DEBUG("IH: D1 vline\n");
3345 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3349 case 5: /* D2 vblank/vline */
3351 case 0: /* D2 vblank */
3352 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3353 if (rdev->irq.crtc_vblank_int[1]) {
3354 drm_handle_vblank(rdev->ddev, 1);
3355 rdev->pm.vblank_sync = true;
3356 wake_up(&rdev->irq.vblank_queue);
3358 if (rdev->irq.pflip[1])
3359 radeon_crtc_handle_flip(rdev, 1);
3360 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3361 DRM_DEBUG("IH: D2 vblank\n");
3364 case 1: /* D1 vline */
3365 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3366 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3367 DRM_DEBUG("IH: D2 vline\n");
3371 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3375 case 19: /* HPD/DAC hotplug */
3378 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3379 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3380 queue_hotplug = true;
3381 DRM_DEBUG("IH: HPD1\n");
3385 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3386 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3387 queue_hotplug = true;
3388 DRM_DEBUG("IH: HPD2\n");
3392 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3393 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3394 queue_hotplug = true;
3395 DRM_DEBUG("IH: HPD3\n");
3399 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3400 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3401 queue_hotplug = true;
3402 DRM_DEBUG("IH: HPD4\n");
3406 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3407 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3408 queue_hotplug = true;
3409 DRM_DEBUG("IH: HPD5\n");
3413 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3414 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3415 queue_hotplug = true;
3416 DRM_DEBUG("IH: HPD6\n");
3420 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3425 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3426 r600_audio_schedule_polling(rdev);
3428 case 176: /* CP_INT in ring buffer */
3429 case 177: /* CP_INT in IB1 */
3430 case 178: /* CP_INT in IB2 */
3431 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3432 radeon_fence_process(rdev);
3434 case 181: /* CP EOP event */
3435 DRM_DEBUG("IH: CP EOP\n");
3436 radeon_fence_process(rdev);
3438 case 233: /* GUI IDLE */
3439 DRM_DEBUG("IH: CP EOP\n");
3440 rdev->pm.gui_idle = true;
3441 wake_up(&rdev->irq.idle_queue);
3444 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3448 /* wptr/rptr are in bytes! */
3450 rptr &= rdev->ih.ptr_mask;
3452 /* make sure wptr hasn't changed while processing */
3453 wptr = r600_get_ih_wptr(rdev);
3454 if (wptr != rdev->ih.wptr)
3457 schedule_work(&rdev->hotplug_work);
3458 rdev->ih.rptr = rptr;
3459 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3460 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3467 #if defined(CONFIG_DEBUG_FS)
3469 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3471 struct drm_info_node *node = (struct drm_info_node *) m->private;
3472 struct drm_device *dev = node->minor->dev;
3473 struct radeon_device *rdev = dev->dev_private;
3474 unsigned count, i, j;
3476 radeon_ring_free_size(rdev);
3477 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3478 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3479 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3480 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3481 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3482 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3483 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3484 seq_printf(m, "%u dwords in ring\n", count);
3486 for (j = 0; j <= count; j++) {
3487 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3488 i = (i + 1) & rdev->cp.ptr_mask;
3493 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3495 struct drm_info_node *node = (struct drm_info_node *) m->private;
3496 struct drm_device *dev = node->minor->dev;
3497 struct radeon_device *rdev = dev->dev_private;
3499 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3500 DREG32_SYS(m, rdev, VM_L2_STATUS);
3504 static struct drm_info_list r600_mc_info_list[] = {
3505 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3506 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3510 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3512 #if defined(CONFIG_DEBUG_FS)
3513 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3520 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3521 * rdev: radeon device structure
3522 * bo: buffer object struct which userspace is waiting for idle
3524 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3525 * through ring buffer, this leads to corruption in rendering, see
3526 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3527 * directly perform HDP flush by writing register through MMIO.
3529 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3531 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3532 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3533 * This seems to cause problems on some AGP cards. Just use the old
3536 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3537 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3538 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3541 WREG32(HDP_DEBUG1, 0);
3542 tmp = readl((void __iomem *)ptr);
3544 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3547 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3549 u32 link_width_cntl, mask, target_reg;
3551 if (rdev->flags & RADEON_IS_IGP)
3554 if (!(rdev->flags & RADEON_IS_PCIE))
3557 /* x2 cards have a special sequence */
3558 if (ASIC_IS_X2(rdev))
3561 /* FIXME wait for idle */
3565 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3568 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3571 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3574 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3577 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3580 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3584 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3588 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3590 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3591 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3594 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3597 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3598 RADEON_PCIE_LC_RECONFIG_NOW |
3599 R600_PCIE_LC_RENEGOTIATE_EN |
3600 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3601 link_width_cntl |= mask;
3603 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3605 /* some northbridges can renegotiate the link rather than requiring
3606 * a complete re-config.
3607 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3609 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3610 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3612 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3614 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3615 RADEON_PCIE_LC_RECONFIG_NOW));
3617 if (rdev->family >= CHIP_RV770)
3618 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3620 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3622 /* wait for lane set to complete */
3623 link_width_cntl = RREG32(target_reg);
3624 while (link_width_cntl == 0xffffffff)
3625 link_width_cntl = RREG32(target_reg);
3629 int r600_get_pcie_lanes(struct radeon_device *rdev)
3631 u32 link_width_cntl;
3633 if (rdev->flags & RADEON_IS_IGP)
3636 if (!(rdev->flags & RADEON_IS_PCIE))
3639 /* x2 cards have a special sequence */
3640 if (ASIC_IS_X2(rdev))
3643 /* FIXME wait for idle */
3645 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3647 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3648 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3650 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3652 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3654 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3656 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3658 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3664 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3666 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3669 if (radeon_pcie_gen2 == 0)
3672 if (rdev->flags & RADEON_IS_IGP)
3675 if (!(rdev->flags & RADEON_IS_PCIE))
3678 /* x2 cards have a special sequence */
3679 if (ASIC_IS_X2(rdev))
3682 /* only RV6xx+ chips are supported */
3683 if (rdev->family <= CHIP_R600)
3686 /* 55 nm r6xx asics */
3687 if ((rdev->family == CHIP_RV670) ||
3688 (rdev->family == CHIP_RV620) ||
3689 (rdev->family == CHIP_RV635)) {
3690 /* advertise upconfig capability */
3691 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3692 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3693 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3694 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3695 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3696 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3697 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3698 LC_RECONFIG_ARC_MISSING_ESCAPE);
3699 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3700 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3702 link_width_cntl |= LC_UPCONFIGURE_DIS;
3703 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3707 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3708 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3709 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3711 /* 55 nm r6xx asics */
3712 if ((rdev->family == CHIP_RV670) ||
3713 (rdev->family == CHIP_RV620) ||
3714 (rdev->family == CHIP_RV635)) {
3715 WREG32(MM_CFGREGS_CNTL, 0x8);
3716 link_cntl2 = RREG32(0x4088);
3717 WREG32(MM_CFGREGS_CNTL, 0);
3718 /* not supported yet */
3719 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3723 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3724 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3725 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3726 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3727 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3728 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3730 tmp = RREG32(0x541c);
3731 WREG32(0x541c, tmp | 0x8);
3732 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3733 link_cntl2 = RREG16(0x4088);
3734 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3736 WREG16(0x4088, link_cntl2);
3737 WREG32(MM_CFGREGS_CNTL, 0);
3739 if ((rdev->family == CHIP_RV670) ||
3740 (rdev->family == CHIP_RV620) ||
3741 (rdev->family == CHIP_RV635)) {
3742 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3743 training_cntl &= ~LC_POINT_7_PLUS_EN;
3744 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3746 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3747 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3748 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3751 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3752 speed_cntl |= LC_GEN2_EN_STRAP;
3753 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3756 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3757 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3759 link_width_cntl |= LC_UPCONFIGURE_DIS;
3761 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3762 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);