2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include <linux/module.h>
34 #include "radeon_drm.h"
36 #include "radeon_asic.h"
37 #include "radeon_mode.h"
42 #define PFP_UCODE_SIZE 576
43 #define PM4_UCODE_SIZE 1792
44 #define RLC_UCODE_SIZE 768
45 #define R700_PFP_UCODE_SIZE 848
46 #define R700_PM4_UCODE_SIZE 1360
47 #define R700_RLC_UCODE_SIZE 1024
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define CAYMAN_RLC_UCODE_SIZE 1024
54 MODULE_FIRMWARE("radeon/R600_pfp.bin");
55 MODULE_FIRMWARE("radeon/R600_me.bin");
56 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV610_me.bin");
58 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV630_me.bin");
60 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV620_me.bin");
62 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV635_me.bin");
64 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV670_me.bin");
66 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
67 MODULE_FIRMWARE("radeon/RS780_me.bin");
68 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV770_me.bin");
70 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV730_me.bin");
72 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
73 MODULE_FIRMWARE("radeon/RV710_me.bin");
74 MODULE_FIRMWARE("radeon/R600_rlc.bin");
75 MODULE_FIRMWARE("radeon/R700_rlc.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
78 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
81 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
84 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
87 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
88 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
89 MODULE_FIRMWARE("radeon/PALM_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
91 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
92 MODULE_FIRMWARE("radeon/SUMO_me.bin");
93 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
94 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
96 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
98 /* r600,rv610,rv630,rv620,rv635,rv670 */
99 int r600_mc_wait_for_idle(struct radeon_device *rdev);
100 void r600_gpu_init(struct radeon_device *rdev);
101 void r600_fini(struct radeon_device *rdev);
102 void r600_irq_disable(struct radeon_device *rdev);
103 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
105 /* get temperature in millidegrees */
106 int rv6xx_get_temp(struct radeon_device *rdev)
108 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
110 int actual_temp = temp & 0xff;
115 return actual_temp * 1000;
118 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
122 rdev->pm.dynpm_can_upclock = true;
123 rdev->pm.dynpm_can_downclock = true;
125 /* power state array is low to high, default is first */
126 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
127 int min_power_state_index = 0;
129 if (rdev->pm.num_power_states > 2)
130 min_power_state_index = 1;
132 switch (rdev->pm.dynpm_planned_action) {
133 case DYNPM_ACTION_MINIMUM:
134 rdev->pm.requested_power_state_index = min_power_state_index;
135 rdev->pm.requested_clock_mode_index = 0;
136 rdev->pm.dynpm_can_downclock = false;
138 case DYNPM_ACTION_DOWNCLOCK:
139 if (rdev->pm.current_power_state_index == min_power_state_index) {
140 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
141 rdev->pm.dynpm_can_downclock = false;
143 if (rdev->pm.active_crtc_count > 1) {
144 for (i = 0; i < rdev->pm.num_power_states; i++) {
145 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
147 else if (i >= rdev->pm.current_power_state_index) {
148 rdev->pm.requested_power_state_index =
149 rdev->pm.current_power_state_index;
152 rdev->pm.requested_power_state_index = i;
157 if (rdev->pm.current_power_state_index == 0)
158 rdev->pm.requested_power_state_index =
159 rdev->pm.num_power_states - 1;
161 rdev->pm.requested_power_state_index =
162 rdev->pm.current_power_state_index - 1;
165 rdev->pm.requested_clock_mode_index = 0;
166 /* don't use the power state if crtcs are active and no display flag is set */
167 if ((rdev->pm.active_crtc_count > 0) &&
168 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
169 clock_info[rdev->pm.requested_clock_mode_index].flags &
170 RADEON_PM_MODE_NO_DISPLAY)) {
171 rdev->pm.requested_power_state_index++;
174 case DYNPM_ACTION_UPCLOCK:
175 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
176 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
177 rdev->pm.dynpm_can_upclock = false;
179 if (rdev->pm.active_crtc_count > 1) {
180 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
181 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
183 else if (i <= rdev->pm.current_power_state_index) {
184 rdev->pm.requested_power_state_index =
185 rdev->pm.current_power_state_index;
188 rdev->pm.requested_power_state_index = i;
193 rdev->pm.requested_power_state_index =
194 rdev->pm.current_power_state_index + 1;
196 rdev->pm.requested_clock_mode_index = 0;
198 case DYNPM_ACTION_DEFAULT:
199 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
200 rdev->pm.requested_clock_mode_index = 0;
201 rdev->pm.dynpm_can_upclock = false;
203 case DYNPM_ACTION_NONE:
205 DRM_ERROR("Requested mode for not defined action\n");
209 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
210 /* for now just select the first power state and switch between clock modes */
211 /* power state array is low to high, default is first (0) */
212 if (rdev->pm.active_crtc_count > 1) {
213 rdev->pm.requested_power_state_index = -1;
214 /* start at 1 as we don't want the default mode */
215 for (i = 1; i < rdev->pm.num_power_states; i++) {
216 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
218 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
219 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
220 rdev->pm.requested_power_state_index = i;
224 /* if nothing selected, grab the default state. */
225 if (rdev->pm.requested_power_state_index == -1)
226 rdev->pm.requested_power_state_index = 0;
228 rdev->pm.requested_power_state_index = 1;
230 switch (rdev->pm.dynpm_planned_action) {
231 case DYNPM_ACTION_MINIMUM:
232 rdev->pm.requested_clock_mode_index = 0;
233 rdev->pm.dynpm_can_downclock = false;
235 case DYNPM_ACTION_DOWNCLOCK:
236 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
237 if (rdev->pm.current_clock_mode_index == 0) {
238 rdev->pm.requested_clock_mode_index = 0;
239 rdev->pm.dynpm_can_downclock = false;
241 rdev->pm.requested_clock_mode_index =
242 rdev->pm.current_clock_mode_index - 1;
244 rdev->pm.requested_clock_mode_index = 0;
245 rdev->pm.dynpm_can_downclock = false;
247 /* don't use the power state if crtcs are active and no display flag is set */
248 if ((rdev->pm.active_crtc_count > 0) &&
249 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
250 clock_info[rdev->pm.requested_clock_mode_index].flags &
251 RADEON_PM_MODE_NO_DISPLAY)) {
252 rdev->pm.requested_clock_mode_index++;
255 case DYNPM_ACTION_UPCLOCK:
256 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
257 if (rdev->pm.current_clock_mode_index ==
258 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
259 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
260 rdev->pm.dynpm_can_upclock = false;
262 rdev->pm.requested_clock_mode_index =
263 rdev->pm.current_clock_mode_index + 1;
265 rdev->pm.requested_clock_mode_index =
266 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
267 rdev->pm.dynpm_can_upclock = false;
270 case DYNPM_ACTION_DEFAULT:
271 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
272 rdev->pm.requested_clock_mode_index = 0;
273 rdev->pm.dynpm_can_upclock = false;
275 case DYNPM_ACTION_NONE:
277 DRM_ERROR("Requested mode for not defined action\n");
282 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
283 rdev->pm.power_state[rdev->pm.requested_power_state_index].
284 clock_info[rdev->pm.requested_clock_mode_index].sclk,
285 rdev->pm.power_state[rdev->pm.requested_power_state_index].
286 clock_info[rdev->pm.requested_clock_mode_index].mclk,
287 rdev->pm.power_state[rdev->pm.requested_power_state_index].
291 void rs780_pm_init_profile(struct radeon_device *rdev)
293 if (rdev->pm.num_power_states == 2) {
295 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
296 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
297 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
298 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
300 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
305 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
310 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
312 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
327 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
329 } else if (rdev->pm.num_power_states == 3) {
331 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
332 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
333 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
336 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
337 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
338 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
341 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
342 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
343 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
344 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
346 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
347 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
348 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
349 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
351 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
352 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
353 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
356 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
357 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
358 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
359 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
361 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
362 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
363 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
364 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
367 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
368 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
369 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
370 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
372 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
373 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
374 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
375 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
377 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
378 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
379 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
380 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
382 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
383 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
384 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
385 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
387 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
388 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
389 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
390 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
393 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
394 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
395 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
397 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
398 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
399 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
400 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
404 void r600_pm_init_profile(struct radeon_device *rdev)
408 if (rdev->family == CHIP_R600) {
411 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
412 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
413 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
414 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
416 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
417 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
418 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
419 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
421 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
422 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
424 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
426 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
427 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
429 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
431 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
432 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
434 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
436 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
439 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
441 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
442 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
444 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
446 if (rdev->pm.num_power_states < 4) {
448 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
449 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
450 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
451 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
453 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
454 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
455 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
456 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
458 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
459 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
460 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
461 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
463 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
464 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
466 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
468 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
469 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
470 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
473 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
474 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
475 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
476 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
478 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
479 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
480 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
481 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
484 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
485 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
489 if (rdev->flags & RADEON_IS_MOBILITY)
490 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
492 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
503 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
509 if (rdev->flags & RADEON_IS_MOBILITY)
510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
532 void r600_pm_misc(struct radeon_device *rdev)
534 int req_ps_idx = rdev->pm.requested_power_state_index;
535 int req_cm_idx = rdev->pm.requested_clock_mode_index;
536 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
537 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
539 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
540 /* 0xff01 is a flag rather then an actual voltage */
541 if (voltage->voltage == 0xff01)
543 if (voltage->voltage != rdev->pm.current_vddc) {
544 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
545 rdev->pm.current_vddc = voltage->voltage;
546 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
551 bool r600_gui_idle(struct radeon_device *rdev)
553 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
559 /* hpd for digital panel detect/disconnect */
560 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
562 bool connected = false;
564 if (ASIC_IS_DCE3(rdev)) {
567 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
571 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
575 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
579 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
584 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
588 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
597 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
601 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
605 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
615 void r600_hpd_set_polarity(struct radeon_device *rdev,
616 enum radeon_hpd_id hpd)
619 bool connected = r600_hpd_sense(rdev, hpd);
621 if (ASIC_IS_DCE3(rdev)) {
624 tmp = RREG32(DC_HPD1_INT_CONTROL);
626 tmp &= ~DC_HPDx_INT_POLARITY;
628 tmp |= DC_HPDx_INT_POLARITY;
629 WREG32(DC_HPD1_INT_CONTROL, tmp);
632 tmp = RREG32(DC_HPD2_INT_CONTROL);
634 tmp &= ~DC_HPDx_INT_POLARITY;
636 tmp |= DC_HPDx_INT_POLARITY;
637 WREG32(DC_HPD2_INT_CONTROL, tmp);
640 tmp = RREG32(DC_HPD3_INT_CONTROL);
642 tmp &= ~DC_HPDx_INT_POLARITY;
644 tmp |= DC_HPDx_INT_POLARITY;
645 WREG32(DC_HPD3_INT_CONTROL, tmp);
648 tmp = RREG32(DC_HPD4_INT_CONTROL);
650 tmp &= ~DC_HPDx_INT_POLARITY;
652 tmp |= DC_HPDx_INT_POLARITY;
653 WREG32(DC_HPD4_INT_CONTROL, tmp);
656 tmp = RREG32(DC_HPD5_INT_CONTROL);
658 tmp &= ~DC_HPDx_INT_POLARITY;
660 tmp |= DC_HPDx_INT_POLARITY;
661 WREG32(DC_HPD5_INT_CONTROL, tmp);
665 tmp = RREG32(DC_HPD6_INT_CONTROL);
667 tmp &= ~DC_HPDx_INT_POLARITY;
669 tmp |= DC_HPDx_INT_POLARITY;
670 WREG32(DC_HPD6_INT_CONTROL, tmp);
678 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
680 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
682 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
683 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
686 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
688 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
690 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
691 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
694 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
696 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
698 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
699 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
707 void r600_hpd_init(struct radeon_device *rdev)
709 struct drm_device *dev = rdev->ddev;
710 struct drm_connector *connector;
712 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
713 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
715 if (ASIC_IS_DCE3(rdev)) {
716 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
717 if (ASIC_IS_DCE32(rdev))
720 switch (radeon_connector->hpd.hpd) {
722 WREG32(DC_HPD1_CONTROL, tmp);
723 rdev->irq.hpd[0] = true;
726 WREG32(DC_HPD2_CONTROL, tmp);
727 rdev->irq.hpd[1] = true;
730 WREG32(DC_HPD3_CONTROL, tmp);
731 rdev->irq.hpd[2] = true;
734 WREG32(DC_HPD4_CONTROL, tmp);
735 rdev->irq.hpd[3] = true;
739 WREG32(DC_HPD5_CONTROL, tmp);
740 rdev->irq.hpd[4] = true;
743 WREG32(DC_HPD6_CONTROL, tmp);
744 rdev->irq.hpd[5] = true;
750 switch (radeon_connector->hpd.hpd) {
752 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
753 rdev->irq.hpd[0] = true;
756 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
757 rdev->irq.hpd[1] = true;
760 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
761 rdev->irq.hpd[2] = true;
767 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
769 if (rdev->irq.installed)
773 void r600_hpd_fini(struct radeon_device *rdev)
775 struct drm_device *dev = rdev->ddev;
776 struct drm_connector *connector;
778 if (ASIC_IS_DCE3(rdev)) {
779 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
780 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
781 switch (radeon_connector->hpd.hpd) {
783 WREG32(DC_HPD1_CONTROL, 0);
784 rdev->irq.hpd[0] = false;
787 WREG32(DC_HPD2_CONTROL, 0);
788 rdev->irq.hpd[1] = false;
791 WREG32(DC_HPD3_CONTROL, 0);
792 rdev->irq.hpd[2] = false;
795 WREG32(DC_HPD4_CONTROL, 0);
796 rdev->irq.hpd[3] = false;
800 WREG32(DC_HPD5_CONTROL, 0);
801 rdev->irq.hpd[4] = false;
804 WREG32(DC_HPD6_CONTROL, 0);
805 rdev->irq.hpd[5] = false;
812 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
813 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
814 switch (radeon_connector->hpd.hpd) {
816 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
817 rdev->irq.hpd[0] = false;
820 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
821 rdev->irq.hpd[1] = false;
824 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
825 rdev->irq.hpd[2] = false;
837 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
842 /* flush hdp cache so updates hit vram */
843 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
844 !(rdev->flags & RADEON_IS_AGP)) {
845 void __iomem *ptr = (void *)rdev->gart.ptr;
848 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
849 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
850 * This seems to cause problems on some AGP cards. Just use the old
853 WREG32(HDP_DEBUG1, 0);
854 tmp = readl((void __iomem *)ptr);
856 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
858 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
859 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
860 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
861 for (i = 0; i < rdev->usec_timeout; i++) {
863 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
864 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
866 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
876 int r600_pcie_gart_init(struct radeon_device *rdev)
880 if (rdev->gart.robj) {
881 WARN(1, "R600 PCIE GART already initialized\n");
884 /* Initialize common gart structure */
885 r = radeon_gart_init(rdev);
888 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
889 return radeon_gart_table_vram_alloc(rdev);
892 int r600_pcie_gart_enable(struct radeon_device *rdev)
897 if (rdev->gart.robj == NULL) {
898 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
901 r = radeon_gart_table_vram_pin(rdev);
904 radeon_gart_restore(rdev);
907 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
908 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
909 EFFECTIVE_L2_QUEUE_SIZE(7));
910 WREG32(VM_L2_CNTL2, 0);
911 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
912 /* Setup TLB control */
913 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
914 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
915 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
916 ENABLE_WAIT_L2_QUERY;
917 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
918 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
919 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
920 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
921 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
922 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
923 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
924 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
925 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
926 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
927 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
928 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
929 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
930 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
931 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
932 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
933 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
934 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
935 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
936 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
937 (u32)(rdev->dummy_page.addr >> 12));
938 for (i = 1; i < 7; i++)
939 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
941 r600_pcie_gart_tlb_flush(rdev);
942 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
943 (unsigned)(rdev->mc.gtt_size >> 20),
944 (unsigned long long)rdev->gart.table_addr);
945 rdev->gart.ready = true;
949 void r600_pcie_gart_disable(struct radeon_device *rdev)
954 /* Disable all tables */
955 for (i = 0; i < 7; i++)
956 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
958 /* Disable L2 cache */
959 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
960 EFFECTIVE_L2_QUEUE_SIZE(7));
961 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
962 /* Setup L1 TLB control */
963 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
964 ENABLE_WAIT_L2_QUERY;
965 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
966 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
968 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
969 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
970 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
971 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
972 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
973 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
974 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
975 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
976 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
977 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
978 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
979 radeon_gart_table_vram_unpin(rdev);
982 void r600_pcie_gart_fini(struct radeon_device *rdev)
984 radeon_gart_fini(rdev);
985 r600_pcie_gart_disable(rdev);
986 radeon_gart_table_vram_free(rdev);
989 void r600_agp_enable(struct radeon_device *rdev)
995 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
996 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
997 EFFECTIVE_L2_QUEUE_SIZE(7));
998 WREG32(VM_L2_CNTL2, 0);
999 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1000 /* Setup TLB control */
1001 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1002 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1003 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1004 ENABLE_WAIT_L2_QUERY;
1005 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1006 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1007 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1008 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1009 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1010 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1011 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1014 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1015 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1016 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1017 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1018 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1019 for (i = 0; i < 7; i++)
1020 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1023 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1028 for (i = 0; i < rdev->usec_timeout; i++) {
1029 /* read MC_STATUS */
1030 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1038 static void r600_mc_program(struct radeon_device *rdev)
1040 struct rv515_mc_save save;
1044 /* Initialize HDP */
1045 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1046 WREG32((0x2c14 + j), 0x00000000);
1047 WREG32((0x2c18 + j), 0x00000000);
1048 WREG32((0x2c1c + j), 0x00000000);
1049 WREG32((0x2c20 + j), 0x00000000);
1050 WREG32((0x2c24 + j), 0x00000000);
1052 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1054 rv515_mc_stop(rdev, &save);
1055 if (r600_mc_wait_for_idle(rdev)) {
1056 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1058 /* Lockout access through VGA aperture (doesn't exist before R600) */
1059 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1060 /* Update configuration */
1061 if (rdev->flags & RADEON_IS_AGP) {
1062 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1063 /* VRAM before AGP */
1064 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1065 rdev->mc.vram_start >> 12);
1066 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1067 rdev->mc.gtt_end >> 12);
1069 /* VRAM after AGP */
1070 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1071 rdev->mc.gtt_start >> 12);
1072 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1073 rdev->mc.vram_end >> 12);
1076 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1077 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1079 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1080 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1081 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1082 WREG32(MC_VM_FB_LOCATION, tmp);
1083 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1084 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1085 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1086 if (rdev->flags & RADEON_IS_AGP) {
1087 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1088 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1089 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1091 WREG32(MC_VM_AGP_BASE, 0);
1092 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1093 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1095 if (r600_mc_wait_for_idle(rdev)) {
1096 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1098 rv515_mc_resume(rdev, &save);
1099 /* we need to own VRAM, so turn off the VGA renderer here
1100 * to stop it overwriting our objects */
1101 rv515_vga_render_disable(rdev);
1105 * r600_vram_gtt_location - try to find VRAM & GTT location
1106 * @rdev: radeon device structure holding all necessary informations
1107 * @mc: memory controller structure holding memory informations
1109 * Function will place try to place VRAM at same place as in CPU (PCI)
1110 * address space as some GPU seems to have issue when we reprogram at
1111 * different address space.
1113 * If there is not enough space to fit the unvisible VRAM after the
1114 * aperture then we limit the VRAM size to the aperture.
1116 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1117 * them to be in one from GPU point of view so that we can program GPU to
1118 * catch access outside them (weird GPU policy see ??).
1120 * This function will never fails, worst case are limiting VRAM or GTT.
1122 * Note: GTT start, end, size should be initialized before calling this
1123 * function on AGP platform.
1125 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1127 u64 size_bf, size_af;
1129 if (mc->mc_vram_size > 0xE0000000) {
1130 /* leave room for at least 512M GTT */
1131 dev_warn(rdev->dev, "limiting VRAM\n");
1132 mc->real_vram_size = 0xE0000000;
1133 mc->mc_vram_size = 0xE0000000;
1135 if (rdev->flags & RADEON_IS_AGP) {
1136 size_bf = mc->gtt_start;
1137 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1138 if (size_bf > size_af) {
1139 if (mc->mc_vram_size > size_bf) {
1140 dev_warn(rdev->dev, "limiting VRAM\n");
1141 mc->real_vram_size = size_bf;
1142 mc->mc_vram_size = size_bf;
1144 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1146 if (mc->mc_vram_size > size_af) {
1147 dev_warn(rdev->dev, "limiting VRAM\n");
1148 mc->real_vram_size = size_af;
1149 mc->mc_vram_size = size_af;
1151 mc->vram_start = mc->gtt_end;
1153 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1154 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1155 mc->mc_vram_size >> 20, mc->vram_start,
1156 mc->vram_end, mc->real_vram_size >> 20);
1159 if (rdev->flags & RADEON_IS_IGP) {
1160 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1163 radeon_vram_location(rdev, &rdev->mc, base);
1164 rdev->mc.gtt_base_align = 0;
1165 radeon_gtt_location(rdev, mc);
1169 int r600_mc_init(struct radeon_device *rdev)
1172 int chansize, numchan;
1174 /* Get VRAM informations */
1175 rdev->mc.vram_is_ddr = true;
1176 tmp = RREG32(RAMCFG);
1177 if (tmp & CHANSIZE_OVERRIDE) {
1179 } else if (tmp & CHANSIZE_MASK) {
1184 tmp = RREG32(CHMAP);
1185 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1200 rdev->mc.vram_width = numchan * chansize;
1201 /* Could aper size report 0 ? */
1202 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1203 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1204 /* Setup GPU memory space */
1205 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1206 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1207 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1208 r600_vram_gtt_location(rdev, &rdev->mc);
1210 if (rdev->flags & RADEON_IS_IGP) {
1211 rs690_pm_info(rdev);
1212 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1214 radeon_update_bandwidth_info(rdev);
1218 int r600_vram_scratch_init(struct radeon_device *rdev)
1222 if (rdev->vram_scratch.robj == NULL) {
1223 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1224 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1225 &rdev->vram_scratch.robj);
1231 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1232 if (unlikely(r != 0))
1234 r = radeon_bo_pin(rdev->vram_scratch.robj,
1235 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1237 radeon_bo_unreserve(rdev->vram_scratch.robj);
1240 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1241 (void **)&rdev->vram_scratch.ptr);
1243 radeon_bo_unpin(rdev->vram_scratch.robj);
1244 radeon_bo_unreserve(rdev->vram_scratch.robj);
1249 void r600_vram_scratch_fini(struct radeon_device *rdev)
1253 if (rdev->vram_scratch.robj == NULL) {
1256 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1257 if (likely(r == 0)) {
1258 radeon_bo_kunmap(rdev->vram_scratch.robj);
1259 radeon_bo_unpin(rdev->vram_scratch.robj);
1260 radeon_bo_unreserve(rdev->vram_scratch.robj);
1262 radeon_bo_unref(&rdev->vram_scratch.robj);
1265 /* We doesn't check that the GPU really needs a reset we simply do the
1266 * reset, it's up to the caller to determine if the GPU needs one. We
1267 * might add an helper function to check that.
1269 int r600_gpu_soft_reset(struct radeon_device *rdev)
1271 struct rv515_mc_save save;
1272 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1273 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1274 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1275 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1276 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1277 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1278 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1279 S_008010_GUI_ACTIVE(1);
1280 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1281 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1282 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1283 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1284 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1285 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1286 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1287 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1290 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1293 dev_info(rdev->dev, "GPU softreset \n");
1294 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1295 RREG32(R_008010_GRBM_STATUS));
1296 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1297 RREG32(R_008014_GRBM_STATUS2));
1298 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1299 RREG32(R_000E50_SRBM_STATUS));
1300 rv515_mc_stop(rdev, &save);
1301 if (r600_mc_wait_for_idle(rdev)) {
1302 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1304 /* Disable CP parsing/prefetching */
1305 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1306 /* Check if any of the rendering block is busy and reset it */
1307 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1308 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1309 tmp = S_008020_SOFT_RESET_CR(1) |
1310 S_008020_SOFT_RESET_DB(1) |
1311 S_008020_SOFT_RESET_CB(1) |
1312 S_008020_SOFT_RESET_PA(1) |
1313 S_008020_SOFT_RESET_SC(1) |
1314 S_008020_SOFT_RESET_SMX(1) |
1315 S_008020_SOFT_RESET_SPI(1) |
1316 S_008020_SOFT_RESET_SX(1) |
1317 S_008020_SOFT_RESET_SH(1) |
1318 S_008020_SOFT_RESET_TC(1) |
1319 S_008020_SOFT_RESET_TA(1) |
1320 S_008020_SOFT_RESET_VC(1) |
1321 S_008020_SOFT_RESET_VGT(1);
1322 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1323 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1324 RREG32(R_008020_GRBM_SOFT_RESET);
1326 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1328 /* Reset CP (we always reset CP) */
1329 tmp = S_008020_SOFT_RESET_CP(1);
1330 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1331 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1332 RREG32(R_008020_GRBM_SOFT_RESET);
1334 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1335 /* Wait a little for things to settle down */
1337 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1338 RREG32(R_008010_GRBM_STATUS));
1339 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1340 RREG32(R_008014_GRBM_STATUS2));
1341 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1342 RREG32(R_000E50_SRBM_STATUS));
1343 rv515_mc_resume(rdev, &save);
1347 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1352 struct r100_gpu_lockup *lockup;
1355 if (rdev->family >= CHIP_RV770)
1356 lockup = &rdev->config.rv770.lockup;
1358 lockup = &rdev->config.r600.lockup;
1360 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1361 grbm_status = RREG32(R_008010_GRBM_STATUS);
1362 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1363 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1364 r100_gpu_lockup_update(lockup, ring);
1367 /* force CP activities */
1368 r = radeon_ring_lock(rdev, ring, 2);
1371 radeon_ring_write(ring, 0x80000000);
1372 radeon_ring_write(ring, 0x80000000);
1373 radeon_ring_unlock_commit(rdev, ring);
1375 ring->rptr = RREG32(ring->rptr_reg);
1376 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1379 int r600_asic_reset(struct radeon_device *rdev)
1381 return r600_gpu_soft_reset(rdev);
1384 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1386 u32 backend_disable_mask)
1388 u32 backend_map = 0;
1389 u32 enabled_backends_mask;
1390 u32 enabled_backends_count;
1392 u32 swizzle_pipe[R6XX_MAX_PIPES];
1396 if (num_tile_pipes > R6XX_MAX_PIPES)
1397 num_tile_pipes = R6XX_MAX_PIPES;
1398 if (num_tile_pipes < 1)
1400 if (num_backends > R6XX_MAX_BACKENDS)
1401 num_backends = R6XX_MAX_BACKENDS;
1402 if (num_backends < 1)
1405 enabled_backends_mask = 0;
1406 enabled_backends_count = 0;
1407 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1408 if (((backend_disable_mask >> i) & 1) == 0) {
1409 enabled_backends_mask |= (1 << i);
1410 ++enabled_backends_count;
1412 if (enabled_backends_count == num_backends)
1416 if (enabled_backends_count == 0) {
1417 enabled_backends_mask = 1;
1418 enabled_backends_count = 1;
1421 if (enabled_backends_count != num_backends)
1422 num_backends = enabled_backends_count;
1424 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1425 switch (num_tile_pipes) {
1427 swizzle_pipe[0] = 0;
1430 swizzle_pipe[0] = 0;
1431 swizzle_pipe[1] = 1;
1434 swizzle_pipe[0] = 0;
1435 swizzle_pipe[1] = 1;
1436 swizzle_pipe[2] = 2;
1439 swizzle_pipe[0] = 0;
1440 swizzle_pipe[1] = 1;
1441 swizzle_pipe[2] = 2;
1442 swizzle_pipe[3] = 3;
1445 swizzle_pipe[0] = 0;
1446 swizzle_pipe[1] = 1;
1447 swizzle_pipe[2] = 2;
1448 swizzle_pipe[3] = 3;
1449 swizzle_pipe[4] = 4;
1452 swizzle_pipe[0] = 0;
1453 swizzle_pipe[1] = 2;
1454 swizzle_pipe[2] = 4;
1455 swizzle_pipe[3] = 5;
1456 swizzle_pipe[4] = 1;
1457 swizzle_pipe[5] = 3;
1460 swizzle_pipe[0] = 0;
1461 swizzle_pipe[1] = 2;
1462 swizzle_pipe[2] = 4;
1463 swizzle_pipe[3] = 6;
1464 swizzle_pipe[4] = 1;
1465 swizzle_pipe[5] = 3;
1466 swizzle_pipe[6] = 5;
1469 swizzle_pipe[0] = 0;
1470 swizzle_pipe[1] = 2;
1471 swizzle_pipe[2] = 4;
1472 swizzle_pipe[3] = 6;
1473 swizzle_pipe[4] = 1;
1474 swizzle_pipe[5] = 3;
1475 swizzle_pipe[6] = 5;
1476 swizzle_pipe[7] = 7;
1481 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1482 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1483 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1485 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1487 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1493 int r600_count_pipe_bits(uint32_t val)
1497 for (i = 0; i < 32; i++) {
1504 void r600_gpu_init(struct radeon_device *rdev)
1509 u32 cc_rb_backend_disable;
1510 u32 cc_gc_shader_pipe_config;
1514 u32 sq_gpr_resource_mgmt_1 = 0;
1515 u32 sq_gpr_resource_mgmt_2 = 0;
1516 u32 sq_thread_resource_mgmt = 0;
1517 u32 sq_stack_resource_mgmt_1 = 0;
1518 u32 sq_stack_resource_mgmt_2 = 0;
1520 /* FIXME: implement */
1521 switch (rdev->family) {
1523 rdev->config.r600.max_pipes = 4;
1524 rdev->config.r600.max_tile_pipes = 8;
1525 rdev->config.r600.max_simds = 4;
1526 rdev->config.r600.max_backends = 4;
1527 rdev->config.r600.max_gprs = 256;
1528 rdev->config.r600.max_threads = 192;
1529 rdev->config.r600.max_stack_entries = 256;
1530 rdev->config.r600.max_hw_contexts = 8;
1531 rdev->config.r600.max_gs_threads = 16;
1532 rdev->config.r600.sx_max_export_size = 128;
1533 rdev->config.r600.sx_max_export_pos_size = 16;
1534 rdev->config.r600.sx_max_export_smx_size = 128;
1535 rdev->config.r600.sq_num_cf_insts = 2;
1539 rdev->config.r600.max_pipes = 2;
1540 rdev->config.r600.max_tile_pipes = 2;
1541 rdev->config.r600.max_simds = 3;
1542 rdev->config.r600.max_backends = 1;
1543 rdev->config.r600.max_gprs = 128;
1544 rdev->config.r600.max_threads = 192;
1545 rdev->config.r600.max_stack_entries = 128;
1546 rdev->config.r600.max_hw_contexts = 8;
1547 rdev->config.r600.max_gs_threads = 4;
1548 rdev->config.r600.sx_max_export_size = 128;
1549 rdev->config.r600.sx_max_export_pos_size = 16;
1550 rdev->config.r600.sx_max_export_smx_size = 128;
1551 rdev->config.r600.sq_num_cf_insts = 2;
1557 rdev->config.r600.max_pipes = 1;
1558 rdev->config.r600.max_tile_pipes = 1;
1559 rdev->config.r600.max_simds = 2;
1560 rdev->config.r600.max_backends = 1;
1561 rdev->config.r600.max_gprs = 128;
1562 rdev->config.r600.max_threads = 192;
1563 rdev->config.r600.max_stack_entries = 128;
1564 rdev->config.r600.max_hw_contexts = 4;
1565 rdev->config.r600.max_gs_threads = 4;
1566 rdev->config.r600.sx_max_export_size = 128;
1567 rdev->config.r600.sx_max_export_pos_size = 16;
1568 rdev->config.r600.sx_max_export_smx_size = 128;
1569 rdev->config.r600.sq_num_cf_insts = 1;
1572 rdev->config.r600.max_pipes = 4;
1573 rdev->config.r600.max_tile_pipes = 4;
1574 rdev->config.r600.max_simds = 4;
1575 rdev->config.r600.max_backends = 4;
1576 rdev->config.r600.max_gprs = 192;
1577 rdev->config.r600.max_threads = 192;
1578 rdev->config.r600.max_stack_entries = 256;
1579 rdev->config.r600.max_hw_contexts = 8;
1580 rdev->config.r600.max_gs_threads = 16;
1581 rdev->config.r600.sx_max_export_size = 128;
1582 rdev->config.r600.sx_max_export_pos_size = 16;
1583 rdev->config.r600.sx_max_export_smx_size = 128;
1584 rdev->config.r600.sq_num_cf_insts = 2;
1590 /* Initialize HDP */
1591 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1592 WREG32((0x2c14 + j), 0x00000000);
1593 WREG32((0x2c18 + j), 0x00000000);
1594 WREG32((0x2c1c + j), 0x00000000);
1595 WREG32((0x2c20 + j), 0x00000000);
1596 WREG32((0x2c24 + j), 0x00000000);
1599 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1603 ramcfg = RREG32(RAMCFG);
1604 switch (rdev->config.r600.max_tile_pipes) {
1606 tiling_config |= PIPE_TILING(0);
1609 tiling_config |= PIPE_TILING(1);
1612 tiling_config |= PIPE_TILING(2);
1615 tiling_config |= PIPE_TILING(3);
1620 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1621 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1622 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1623 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1624 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1625 rdev->config.r600.tiling_group_size = 512;
1627 rdev->config.r600.tiling_group_size = 256;
1628 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1630 tiling_config |= ROW_TILING(3);
1631 tiling_config |= SAMPLE_SPLIT(3);
1633 tiling_config |= ROW_TILING(tmp);
1634 tiling_config |= SAMPLE_SPLIT(tmp);
1636 tiling_config |= BANK_SWAPS(1);
1638 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1639 cc_rb_backend_disable |=
1640 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1642 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1643 cc_gc_shader_pipe_config |=
1644 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1645 cc_gc_shader_pipe_config |=
1646 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1648 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1649 (R6XX_MAX_BACKENDS -
1650 r600_count_pipe_bits((cc_rb_backend_disable &
1651 R6XX_MAX_BACKENDS_MASK) >> 16)),
1652 (cc_rb_backend_disable >> 16));
1653 rdev->config.r600.tile_config = tiling_config;
1654 rdev->config.r600.backend_map = backend_map;
1655 tiling_config |= BACKEND_MAP(backend_map);
1656 WREG32(GB_TILING_CONFIG, tiling_config);
1657 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1658 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1661 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1662 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1663 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1665 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1666 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1667 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1669 /* Setup some CP states */
1670 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1671 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1673 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1674 SYNC_WALKER | SYNC_ALIGNER));
1675 /* Setup various GPU states */
1676 if (rdev->family == CHIP_RV670)
1677 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1679 tmp = RREG32(SX_DEBUG_1);
1680 tmp |= SMX_EVENT_RELEASE;
1681 if ((rdev->family > CHIP_R600))
1682 tmp |= ENABLE_NEW_SMX_ADDRESS;
1683 WREG32(SX_DEBUG_1, tmp);
1685 if (((rdev->family) == CHIP_R600) ||
1686 ((rdev->family) == CHIP_RV630) ||
1687 ((rdev->family) == CHIP_RV610) ||
1688 ((rdev->family) == CHIP_RV620) ||
1689 ((rdev->family) == CHIP_RS780) ||
1690 ((rdev->family) == CHIP_RS880)) {
1691 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1693 WREG32(DB_DEBUG, 0);
1695 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1696 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1698 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1699 WREG32(VGT_NUM_INSTANCES, 0);
1701 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1702 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1704 tmp = RREG32(SQ_MS_FIFO_SIZES);
1705 if (((rdev->family) == CHIP_RV610) ||
1706 ((rdev->family) == CHIP_RV620) ||
1707 ((rdev->family) == CHIP_RS780) ||
1708 ((rdev->family) == CHIP_RS880)) {
1709 tmp = (CACHE_FIFO_SIZE(0xa) |
1710 FETCH_FIFO_HIWATER(0xa) |
1711 DONE_FIFO_HIWATER(0xe0) |
1712 ALU_UPDATE_FIFO_HIWATER(0x8));
1713 } else if (((rdev->family) == CHIP_R600) ||
1714 ((rdev->family) == CHIP_RV630)) {
1715 tmp &= ~DONE_FIFO_HIWATER(0xff);
1716 tmp |= DONE_FIFO_HIWATER(0x4);
1718 WREG32(SQ_MS_FIFO_SIZES, tmp);
1720 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1721 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1723 sq_config = RREG32(SQ_CONFIG);
1724 sq_config &= ~(PS_PRIO(3) |
1728 sq_config |= (DX9_CONSTS |
1735 if ((rdev->family) == CHIP_R600) {
1736 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1738 NUM_CLAUSE_TEMP_GPRS(4));
1739 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1741 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1742 NUM_VS_THREADS(48) |
1745 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1746 NUM_VS_STACK_ENTRIES(128));
1747 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1748 NUM_ES_STACK_ENTRIES(0));
1749 } else if (((rdev->family) == CHIP_RV610) ||
1750 ((rdev->family) == CHIP_RV620) ||
1751 ((rdev->family) == CHIP_RS780) ||
1752 ((rdev->family) == CHIP_RS880)) {
1753 /* no vertex cache */
1754 sq_config &= ~VC_ENABLE;
1756 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1758 NUM_CLAUSE_TEMP_GPRS(2));
1759 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1761 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1762 NUM_VS_THREADS(78) |
1764 NUM_ES_THREADS(31));
1765 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1766 NUM_VS_STACK_ENTRIES(40));
1767 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1768 NUM_ES_STACK_ENTRIES(16));
1769 } else if (((rdev->family) == CHIP_RV630) ||
1770 ((rdev->family) == CHIP_RV635)) {
1771 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1773 NUM_CLAUSE_TEMP_GPRS(2));
1774 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1776 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1777 NUM_VS_THREADS(78) |
1779 NUM_ES_THREADS(31));
1780 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1781 NUM_VS_STACK_ENTRIES(40));
1782 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1783 NUM_ES_STACK_ENTRIES(16));
1784 } else if ((rdev->family) == CHIP_RV670) {
1785 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1787 NUM_CLAUSE_TEMP_GPRS(2));
1788 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1790 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1791 NUM_VS_THREADS(78) |
1793 NUM_ES_THREADS(31));
1794 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1795 NUM_VS_STACK_ENTRIES(64));
1796 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1797 NUM_ES_STACK_ENTRIES(64));
1800 WREG32(SQ_CONFIG, sq_config);
1801 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1802 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1803 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1804 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1805 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1807 if (((rdev->family) == CHIP_RV610) ||
1808 ((rdev->family) == CHIP_RV620) ||
1809 ((rdev->family) == CHIP_RS780) ||
1810 ((rdev->family) == CHIP_RS880)) {
1811 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1813 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1816 /* More default values. 2D/3D driver should adjust as needed */
1817 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1818 S1_X(0x4) | S1_Y(0xc)));
1819 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1820 S1_X(0x2) | S1_Y(0x2) |
1821 S2_X(0xa) | S2_Y(0x6) |
1822 S3_X(0x6) | S3_Y(0xa)));
1823 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1824 S1_X(0x4) | S1_Y(0xc) |
1825 S2_X(0x1) | S2_Y(0x6) |
1826 S3_X(0xa) | S3_Y(0xe)));
1827 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1828 S5_X(0x0) | S5_Y(0x0) |
1829 S6_X(0xb) | S6_Y(0x4) |
1830 S7_X(0x7) | S7_Y(0x8)));
1832 WREG32(VGT_STRMOUT_EN, 0);
1833 tmp = rdev->config.r600.max_pipes * 16;
1834 switch (rdev->family) {
1850 WREG32(VGT_ES_PER_GS, 128);
1851 WREG32(VGT_GS_PER_ES, tmp);
1852 WREG32(VGT_GS_PER_VS, 2);
1853 WREG32(VGT_GS_VERTEX_REUSE, 16);
1855 /* more default values. 2D/3D driver should adjust as needed */
1856 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1857 WREG32(VGT_STRMOUT_EN, 0);
1859 WREG32(PA_SC_MODE_CNTL, 0);
1860 WREG32(PA_SC_AA_CONFIG, 0);
1861 WREG32(PA_SC_LINE_STIPPLE, 0);
1862 WREG32(SPI_INPUT_Z, 0);
1863 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1864 WREG32(CB_COLOR7_FRAG, 0);
1866 /* Clear render buffer base addresses */
1867 WREG32(CB_COLOR0_BASE, 0);
1868 WREG32(CB_COLOR1_BASE, 0);
1869 WREG32(CB_COLOR2_BASE, 0);
1870 WREG32(CB_COLOR3_BASE, 0);
1871 WREG32(CB_COLOR4_BASE, 0);
1872 WREG32(CB_COLOR5_BASE, 0);
1873 WREG32(CB_COLOR6_BASE, 0);
1874 WREG32(CB_COLOR7_BASE, 0);
1875 WREG32(CB_COLOR7_FRAG, 0);
1877 switch (rdev->family) {
1882 tmp = TC_L2_SIZE(8);
1886 tmp = TC_L2_SIZE(4);
1889 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1892 tmp = TC_L2_SIZE(0);
1895 WREG32(TC_CNTL, tmp);
1897 tmp = RREG32(HDP_HOST_PATH_CNTL);
1898 WREG32(HDP_HOST_PATH_CNTL, tmp);
1900 tmp = RREG32(ARB_POP);
1901 tmp |= ENABLE_TC128;
1902 WREG32(ARB_POP, tmp);
1904 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1905 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1907 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1912 * Indirect registers accessor
1914 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1918 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1919 (void)RREG32(PCIE_PORT_INDEX);
1920 r = RREG32(PCIE_PORT_DATA);
1924 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1926 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1927 (void)RREG32(PCIE_PORT_INDEX);
1928 WREG32(PCIE_PORT_DATA, (v));
1929 (void)RREG32(PCIE_PORT_DATA);
1935 void r600_cp_stop(struct radeon_device *rdev)
1937 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1938 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1939 WREG32(SCRATCH_UMSK, 0);
1942 int r600_init_microcode(struct radeon_device *rdev)
1944 struct platform_device *pdev;
1945 const char *chip_name;
1946 const char *rlc_chip_name;
1947 size_t pfp_req_size, me_req_size, rlc_req_size;
1953 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1956 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1960 switch (rdev->family) {
1963 rlc_chip_name = "R600";
1966 chip_name = "RV610";
1967 rlc_chip_name = "R600";
1970 chip_name = "RV630";
1971 rlc_chip_name = "R600";
1974 chip_name = "RV620";
1975 rlc_chip_name = "R600";
1978 chip_name = "RV635";
1979 rlc_chip_name = "R600";
1982 chip_name = "RV670";
1983 rlc_chip_name = "R600";
1987 chip_name = "RS780";
1988 rlc_chip_name = "R600";
1991 chip_name = "RV770";
1992 rlc_chip_name = "R700";
1996 chip_name = "RV730";
1997 rlc_chip_name = "R700";
2000 chip_name = "RV710";
2001 rlc_chip_name = "R700";
2004 chip_name = "CEDAR";
2005 rlc_chip_name = "CEDAR";
2008 chip_name = "REDWOOD";
2009 rlc_chip_name = "REDWOOD";
2012 chip_name = "JUNIPER";
2013 rlc_chip_name = "JUNIPER";
2017 chip_name = "CYPRESS";
2018 rlc_chip_name = "CYPRESS";
2022 rlc_chip_name = "SUMO";
2026 rlc_chip_name = "SUMO";
2029 chip_name = "SUMO2";
2030 rlc_chip_name = "SUMO";
2035 if (rdev->family >= CHIP_CEDAR) {
2036 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2037 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2038 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2039 } else if (rdev->family >= CHIP_RV770) {
2040 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2041 me_req_size = R700_PM4_UCODE_SIZE * 4;
2042 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2044 pfp_req_size = PFP_UCODE_SIZE * 4;
2045 me_req_size = PM4_UCODE_SIZE * 12;
2046 rlc_req_size = RLC_UCODE_SIZE * 4;
2049 DRM_INFO("Loading %s Microcode\n", chip_name);
2051 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2052 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2055 if (rdev->pfp_fw->size != pfp_req_size) {
2057 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2058 rdev->pfp_fw->size, fw_name);
2063 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2064 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2067 if (rdev->me_fw->size != me_req_size) {
2069 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2070 rdev->me_fw->size, fw_name);
2074 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2075 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2078 if (rdev->rlc_fw->size != rlc_req_size) {
2080 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2081 rdev->rlc_fw->size, fw_name);
2086 platform_device_unregister(pdev);
2091 "r600_cp: Failed to load firmware \"%s\"\n",
2093 release_firmware(rdev->pfp_fw);
2094 rdev->pfp_fw = NULL;
2095 release_firmware(rdev->me_fw);
2097 release_firmware(rdev->rlc_fw);
2098 rdev->rlc_fw = NULL;
2103 static int r600_cp_load_microcode(struct radeon_device *rdev)
2105 const __be32 *fw_data;
2108 if (!rdev->me_fw || !rdev->pfp_fw)
2117 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2120 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2121 RREG32(GRBM_SOFT_RESET);
2123 WREG32(GRBM_SOFT_RESET, 0);
2125 WREG32(CP_ME_RAM_WADDR, 0);
2127 fw_data = (const __be32 *)rdev->me_fw->data;
2128 WREG32(CP_ME_RAM_WADDR, 0);
2129 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2130 WREG32(CP_ME_RAM_DATA,
2131 be32_to_cpup(fw_data++));
2133 fw_data = (const __be32 *)rdev->pfp_fw->data;
2134 WREG32(CP_PFP_UCODE_ADDR, 0);
2135 for (i = 0; i < PFP_UCODE_SIZE; i++)
2136 WREG32(CP_PFP_UCODE_DATA,
2137 be32_to_cpup(fw_data++));
2139 WREG32(CP_PFP_UCODE_ADDR, 0);
2140 WREG32(CP_ME_RAM_WADDR, 0);
2141 WREG32(CP_ME_RAM_RADDR, 0);
2145 int r600_cp_start(struct radeon_device *rdev)
2147 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2151 r = radeon_ring_lock(rdev, ring, 7);
2153 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2156 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2157 radeon_ring_write(ring, 0x1);
2158 if (rdev->family >= CHIP_RV770) {
2159 radeon_ring_write(ring, 0x0);
2160 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2162 radeon_ring_write(ring, 0x3);
2163 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2165 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2166 radeon_ring_write(ring, 0);
2167 radeon_ring_write(ring, 0);
2168 radeon_ring_unlock_commit(rdev, ring);
2171 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2175 int r600_cp_resume(struct radeon_device *rdev)
2177 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2183 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2184 RREG32(GRBM_SOFT_RESET);
2186 WREG32(GRBM_SOFT_RESET, 0);
2188 /* Set ring buffer size */
2189 rb_bufsz = drm_order(ring->ring_size / 8);
2190 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2192 tmp |= BUF_SWAP_32BIT;
2194 WREG32(CP_RB_CNTL, tmp);
2195 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2197 /* Set the write pointer delay */
2198 WREG32(CP_RB_WPTR_DELAY, 0);
2200 /* Initialize the ring buffer's read and write pointers */
2201 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2202 WREG32(CP_RB_RPTR_WR, 0);
2204 WREG32(CP_RB_WPTR, ring->wptr);
2206 /* set the wb address whether it's enabled or not */
2207 WREG32(CP_RB_RPTR_ADDR,
2208 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2209 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2210 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2212 if (rdev->wb.enabled)
2213 WREG32(SCRATCH_UMSK, 0xff);
2215 tmp |= RB_NO_UPDATE;
2216 WREG32(SCRATCH_UMSK, 0);
2220 WREG32(CP_RB_CNTL, tmp);
2222 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2223 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2225 ring->rptr = RREG32(CP_RB_RPTR);
2227 r600_cp_start(rdev);
2229 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2231 ring->ready = false;
2237 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2241 /* Align ring size */
2242 rb_bufsz = drm_order(ring_size / 8);
2243 ring_size = (1 << (rb_bufsz + 1)) * 4;
2244 ring->ring_size = ring_size;
2245 ring->align_mask = 16 - 1;
2248 void r600_cp_fini(struct radeon_device *rdev)
2251 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2256 * GPU scratch registers helpers function.
2258 void r600_scratch_init(struct radeon_device *rdev)
2262 rdev->scratch.num_reg = 7;
2263 rdev->scratch.reg_base = SCRATCH_REG0;
2264 for (i = 0; i < rdev->scratch.num_reg; i++) {
2265 rdev->scratch.free[i] = true;
2266 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2270 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2274 unsigned i, ridx = radeon_ring_index(rdev, ring);
2277 r = radeon_scratch_get(rdev, &scratch);
2279 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2282 WREG32(scratch, 0xCAFEDEAD);
2283 r = radeon_ring_lock(rdev, ring, 3);
2285 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
2286 radeon_scratch_free(rdev, scratch);
2289 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2290 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2291 radeon_ring_write(ring, 0xDEADBEEF);
2292 radeon_ring_unlock_commit(rdev, ring);
2293 for (i = 0; i < rdev->usec_timeout; i++) {
2294 tmp = RREG32(scratch);
2295 if (tmp == 0xDEADBEEF)
2299 if (i < rdev->usec_timeout) {
2300 DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
2302 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2303 ridx, scratch, tmp);
2306 radeon_scratch_free(rdev, scratch);
2310 void r600_fence_ring_emit(struct radeon_device *rdev,
2311 struct radeon_fence *fence)
2313 struct radeon_ring *ring = &rdev->ring[fence->ring];
2315 if (rdev->wb.use_event) {
2316 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2317 /* flush read cache over gart */
2318 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2319 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2320 PACKET3_VC_ACTION_ENA |
2321 PACKET3_SH_ACTION_ENA);
2322 radeon_ring_write(ring, 0xFFFFFFFF);
2323 radeon_ring_write(ring, 0);
2324 radeon_ring_write(ring, 10); /* poll interval */
2325 /* EVENT_WRITE_EOP - flush caches, send int */
2326 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2327 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2328 radeon_ring_write(ring, addr & 0xffffffff);
2329 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2330 radeon_ring_write(ring, fence->seq);
2331 radeon_ring_write(ring, 0);
2333 /* flush read cache over gart */
2334 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2335 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2336 PACKET3_VC_ACTION_ENA |
2337 PACKET3_SH_ACTION_ENA);
2338 radeon_ring_write(ring, 0xFFFFFFFF);
2339 radeon_ring_write(ring, 0);
2340 radeon_ring_write(ring, 10); /* poll interval */
2341 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2342 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2343 /* wait for 3D idle clean */
2344 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2345 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2346 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2347 /* Emit fence sequence & fire IRQ */
2348 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2349 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2350 radeon_ring_write(ring, fence->seq);
2351 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2352 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2353 radeon_ring_write(ring, RB_INT_STAT);
2357 void r600_semaphore_ring_emit(struct radeon_device *rdev,
2358 struct radeon_ring *ring,
2359 struct radeon_semaphore *semaphore,
2362 uint64_t addr = semaphore->gpu_addr;
2363 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2365 if (rdev->family < CHIP_CAYMAN)
2366 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2368 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2369 radeon_ring_write(ring, addr & 0xffffffff);
2370 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2373 int r600_copy_blit(struct radeon_device *rdev,
2374 uint64_t src_offset,
2375 uint64_t dst_offset,
2376 unsigned num_gpu_pages,
2377 struct radeon_fence *fence)
2381 mutex_lock(&rdev->r600_blit.mutex);
2382 rdev->r600_blit.vb_ib = NULL;
2383 r = r600_blit_prepare_copy(rdev, num_gpu_pages);
2385 if (rdev->r600_blit.vb_ib)
2386 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2387 mutex_unlock(&rdev->r600_blit.mutex);
2390 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
2391 r600_blit_done_copy(rdev, fence);
2392 mutex_unlock(&rdev->r600_blit.mutex);
2396 void r600_blit_suspend(struct radeon_device *rdev)
2400 /* unpin shaders bo */
2401 if (rdev->r600_blit.shader_obj) {
2402 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2404 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2405 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2410 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2411 uint32_t tiling_flags, uint32_t pitch,
2412 uint32_t offset, uint32_t obj_size)
2414 /* FIXME: implement */
2418 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2420 /* FIXME: implement */
2423 int r600_startup(struct radeon_device *rdev)
2425 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2428 /* enable pcie gen2 link */
2429 r600_pcie_gen2_enable(rdev);
2431 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2432 r = r600_init_microcode(rdev);
2434 DRM_ERROR("Failed to load firmware!\n");
2439 r = r600_vram_scratch_init(rdev);
2443 r600_mc_program(rdev);
2444 if (rdev->flags & RADEON_IS_AGP) {
2445 r600_agp_enable(rdev);
2447 r = r600_pcie_gart_enable(rdev);
2451 r600_gpu_init(rdev);
2452 r = r600_blit_init(rdev);
2454 r600_blit_fini(rdev);
2455 rdev->asic->copy.copy = NULL;
2456 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2459 /* allocate wb buffer */
2460 r = radeon_wb_init(rdev);
2464 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2466 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2471 r = r600_irq_init(rdev);
2473 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2474 radeon_irq_kms_fini(rdev);
2479 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2480 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2481 0, 0xfffff, RADEON_CP_PACKET2);
2485 r = r600_cp_load_microcode(rdev);
2488 r = r600_cp_resume(rdev);
2492 r = radeon_ib_pool_start(rdev);
2496 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2498 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2499 rdev->accel_working = false;
2506 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2510 temp = RREG32(CONFIG_CNTL);
2511 if (state == false) {
2517 WREG32(CONFIG_CNTL, temp);
2520 int r600_resume(struct radeon_device *rdev)
2524 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2525 * posting will perform necessary task to bring back GPU into good
2529 atom_asic_init(rdev->mode_info.atom_context);
2531 rdev->accel_working = true;
2532 r = r600_startup(rdev);
2534 DRM_ERROR("r600 startup failed on resume\n");
2535 rdev->accel_working = false;
2539 r = r600_audio_init(rdev);
2541 DRM_ERROR("radeon: audio resume failed\n");
2548 int r600_suspend(struct radeon_device *rdev)
2550 r600_audio_fini(rdev);
2551 radeon_ib_pool_suspend(rdev);
2552 r600_blit_suspend(rdev);
2553 /* FIXME: we should wait for ring to be empty */
2555 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2556 r600_irq_suspend(rdev);
2557 radeon_wb_disable(rdev);
2558 r600_pcie_gart_disable(rdev);
2563 /* Plan is to move initialization in that function and use
2564 * helper function so that radeon_device_init pretty much
2565 * do nothing more than calling asic specific function. This
2566 * should also allow to remove a bunch of callback function
2569 int r600_init(struct radeon_device *rdev)
2573 if (r600_debugfs_mc_info_init(rdev)) {
2574 DRM_ERROR("Failed to register debugfs file for mc !\n");
2576 /* This don't do much */
2577 r = radeon_gem_init(rdev);
2581 if (!radeon_get_bios(rdev)) {
2582 if (ASIC_IS_AVIVO(rdev))
2585 /* Must be an ATOMBIOS */
2586 if (!rdev->is_atom_bios) {
2587 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2590 r = radeon_atombios_init(rdev);
2593 /* Post card if necessary */
2594 if (!radeon_card_posted(rdev)) {
2596 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2599 DRM_INFO("GPU not posted. posting now...\n");
2600 atom_asic_init(rdev->mode_info.atom_context);
2602 /* Initialize scratch registers */
2603 r600_scratch_init(rdev);
2604 /* Initialize surface registers */
2605 radeon_surface_init(rdev);
2606 /* Initialize clocks */
2607 radeon_get_clock_info(rdev->ddev);
2609 r = radeon_fence_driver_init(rdev);
2612 if (rdev->flags & RADEON_IS_AGP) {
2613 r = radeon_agp_init(rdev);
2615 radeon_agp_disable(rdev);
2617 r = r600_mc_init(rdev);
2620 /* Memory manager */
2621 r = radeon_bo_init(rdev);
2625 r = radeon_irq_kms_init(rdev);
2629 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2630 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2632 rdev->ih.ring_obj = NULL;
2633 r600_ih_ring_init(rdev, 64 * 1024);
2635 r = r600_pcie_gart_init(rdev);
2639 r = radeon_ib_pool_init(rdev);
2640 rdev->accel_working = true;
2642 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2643 rdev->accel_working = false;
2646 r = r600_startup(rdev);
2648 dev_err(rdev->dev, "disabling GPU acceleration\n");
2650 r600_irq_fini(rdev);
2651 radeon_wb_fini(rdev);
2653 radeon_irq_kms_fini(rdev);
2654 r600_pcie_gart_fini(rdev);
2655 rdev->accel_working = false;
2658 r = r600_audio_init(rdev);
2660 return r; /* TODO error handling */
2664 void r600_fini(struct radeon_device *rdev)
2666 r600_audio_fini(rdev);
2667 r600_blit_fini(rdev);
2669 r600_irq_fini(rdev);
2670 radeon_wb_fini(rdev);
2672 radeon_irq_kms_fini(rdev);
2673 r600_pcie_gart_fini(rdev);
2674 r600_vram_scratch_fini(rdev);
2675 radeon_agp_fini(rdev);
2676 radeon_gem_fini(rdev);
2677 radeon_semaphore_driver_fini(rdev);
2678 radeon_fence_driver_fini(rdev);
2679 radeon_bo_fini(rdev);
2680 radeon_atombios_fini(rdev);
2689 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2691 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
2693 /* FIXME: implement */
2694 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2695 radeon_ring_write(ring,
2699 (ib->gpu_addr & 0xFFFFFFFC));
2700 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2701 radeon_ring_write(ring, ib->length_dw);
2704 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2706 struct radeon_ib *ib;
2711 int ring_index = radeon_ring_index(rdev, ring);
2713 r = radeon_scratch_get(rdev, &scratch);
2715 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2718 WREG32(scratch, 0xCAFEDEAD);
2719 r = radeon_ib_get(rdev, ring_index, &ib, 256);
2721 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2724 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2725 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2726 ib->ptr[2] = 0xDEADBEEF;
2728 r = radeon_ib_schedule(rdev, ib);
2730 radeon_scratch_free(rdev, scratch);
2731 radeon_ib_free(rdev, &ib);
2732 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2735 r = radeon_fence_wait(ib->fence, false);
2737 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2740 for (i = 0; i < rdev->usec_timeout; i++) {
2741 tmp = RREG32(scratch);
2742 if (tmp == 0xDEADBEEF)
2746 if (i < rdev->usec_timeout) {
2747 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
2749 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2753 radeon_scratch_free(rdev, scratch);
2754 radeon_ib_free(rdev, &ib);
2761 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2762 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2763 * writing to the ring and the GPU consuming, the GPU writes to the ring
2764 * and host consumes. As the host irq handler processes interrupts, it
2765 * increments the rptr. When the rptr catches up with the wptr, all the
2766 * current interrupts have been processed.
2769 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2773 /* Align ring size */
2774 rb_bufsz = drm_order(ring_size / 4);
2775 ring_size = (1 << rb_bufsz) * 4;
2776 rdev->ih.ring_size = ring_size;
2777 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2781 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2785 /* Allocate ring buffer */
2786 if (rdev->ih.ring_obj == NULL) {
2787 r = radeon_bo_create(rdev, rdev->ih.ring_size,
2789 RADEON_GEM_DOMAIN_GTT,
2790 &rdev->ih.ring_obj);
2792 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2795 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2796 if (unlikely(r != 0))
2798 r = radeon_bo_pin(rdev->ih.ring_obj,
2799 RADEON_GEM_DOMAIN_GTT,
2800 &rdev->ih.gpu_addr);
2802 radeon_bo_unreserve(rdev->ih.ring_obj);
2803 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2806 r = radeon_bo_kmap(rdev->ih.ring_obj,
2807 (void **)&rdev->ih.ring);
2808 radeon_bo_unreserve(rdev->ih.ring_obj);
2810 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2817 static void r600_ih_ring_fini(struct radeon_device *rdev)
2820 if (rdev->ih.ring_obj) {
2821 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2822 if (likely(r == 0)) {
2823 radeon_bo_kunmap(rdev->ih.ring_obj);
2824 radeon_bo_unpin(rdev->ih.ring_obj);
2825 radeon_bo_unreserve(rdev->ih.ring_obj);
2827 radeon_bo_unref(&rdev->ih.ring_obj);
2828 rdev->ih.ring = NULL;
2829 rdev->ih.ring_obj = NULL;
2833 void r600_rlc_stop(struct radeon_device *rdev)
2836 if ((rdev->family >= CHIP_RV770) &&
2837 (rdev->family <= CHIP_RV740)) {
2838 /* r7xx asics need to soft reset RLC before halting */
2839 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2840 RREG32(SRBM_SOFT_RESET);
2842 WREG32(SRBM_SOFT_RESET, 0);
2843 RREG32(SRBM_SOFT_RESET);
2846 WREG32(RLC_CNTL, 0);
2849 static void r600_rlc_start(struct radeon_device *rdev)
2851 WREG32(RLC_CNTL, RLC_ENABLE);
2854 static int r600_rlc_init(struct radeon_device *rdev)
2857 const __be32 *fw_data;
2862 r600_rlc_stop(rdev);
2864 WREG32(RLC_HB_BASE, 0);
2865 WREG32(RLC_HB_CNTL, 0);
2866 WREG32(RLC_HB_RPTR, 0);
2867 WREG32(RLC_HB_WPTR, 0);
2868 if (rdev->family <= CHIP_CAICOS) {
2869 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2870 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2872 WREG32(RLC_MC_CNTL, 0);
2873 WREG32(RLC_UCODE_CNTL, 0);
2875 fw_data = (const __be32 *)rdev->rlc_fw->data;
2876 if (rdev->family >= CHIP_CAYMAN) {
2877 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2878 WREG32(RLC_UCODE_ADDR, i);
2879 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2881 } else if (rdev->family >= CHIP_CEDAR) {
2882 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2883 WREG32(RLC_UCODE_ADDR, i);
2884 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2886 } else if (rdev->family >= CHIP_RV770) {
2887 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2888 WREG32(RLC_UCODE_ADDR, i);
2889 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2892 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2893 WREG32(RLC_UCODE_ADDR, i);
2894 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2897 WREG32(RLC_UCODE_ADDR, 0);
2899 r600_rlc_start(rdev);
2904 static void r600_enable_interrupts(struct radeon_device *rdev)
2906 u32 ih_cntl = RREG32(IH_CNTL);
2907 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2909 ih_cntl |= ENABLE_INTR;
2910 ih_rb_cntl |= IH_RB_ENABLE;
2911 WREG32(IH_CNTL, ih_cntl);
2912 WREG32(IH_RB_CNTL, ih_rb_cntl);
2913 rdev->ih.enabled = true;
2916 void r600_disable_interrupts(struct radeon_device *rdev)
2918 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2919 u32 ih_cntl = RREG32(IH_CNTL);
2921 ih_rb_cntl &= ~IH_RB_ENABLE;
2922 ih_cntl &= ~ENABLE_INTR;
2923 WREG32(IH_RB_CNTL, ih_rb_cntl);
2924 WREG32(IH_CNTL, ih_cntl);
2925 /* set rptr, wptr to 0 */
2926 WREG32(IH_RB_RPTR, 0);
2927 WREG32(IH_RB_WPTR, 0);
2928 rdev->ih.enabled = false;
2933 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2937 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2938 WREG32(GRBM_INT_CNTL, 0);
2939 WREG32(DxMODE_INT_MASK, 0);
2940 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2941 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2942 if (ASIC_IS_DCE3(rdev)) {
2943 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2944 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2945 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2946 WREG32(DC_HPD1_INT_CONTROL, tmp);
2947 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2948 WREG32(DC_HPD2_INT_CONTROL, tmp);
2949 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2950 WREG32(DC_HPD3_INT_CONTROL, tmp);
2951 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2952 WREG32(DC_HPD4_INT_CONTROL, tmp);
2953 if (ASIC_IS_DCE32(rdev)) {
2954 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2955 WREG32(DC_HPD5_INT_CONTROL, tmp);
2956 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2957 WREG32(DC_HPD6_INT_CONTROL, tmp);
2960 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2961 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2962 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2963 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2964 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2965 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2966 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2967 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2971 int r600_irq_init(struct radeon_device *rdev)
2975 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2978 ret = r600_ih_ring_alloc(rdev);
2983 r600_disable_interrupts(rdev);
2986 ret = r600_rlc_init(rdev);
2988 r600_ih_ring_fini(rdev);
2992 /* setup interrupt control */
2993 /* set dummy read address to ring address */
2994 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2995 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2996 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2997 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2999 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3000 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3001 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3002 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3004 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3005 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3007 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3008 IH_WPTR_OVERFLOW_CLEAR |
3011 if (rdev->wb.enabled)
3012 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3014 /* set the writeback address whether it's enabled or not */
3015 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3016 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3018 WREG32(IH_RB_CNTL, ih_rb_cntl);
3020 /* set rptr, wptr to 0 */
3021 WREG32(IH_RB_RPTR, 0);
3022 WREG32(IH_RB_WPTR, 0);
3024 /* Default settings for IH_CNTL (disabled at first) */
3025 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3026 /* RPTR_REARM only works if msi's are enabled */
3027 if (rdev->msi_enabled)
3028 ih_cntl |= RPTR_REARM;
3029 WREG32(IH_CNTL, ih_cntl);
3031 /* force the active interrupt state to all disabled */
3032 if (rdev->family >= CHIP_CEDAR)
3033 evergreen_disable_interrupt_state(rdev);
3035 r600_disable_interrupt_state(rdev);
3038 r600_enable_interrupts(rdev);
3043 void r600_irq_suspend(struct radeon_device *rdev)
3045 r600_irq_disable(rdev);
3046 r600_rlc_stop(rdev);
3049 void r600_irq_fini(struct radeon_device *rdev)
3051 r600_irq_suspend(rdev);
3052 r600_ih_ring_fini(rdev);
3055 int r600_irq_set(struct radeon_device *rdev)
3057 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3059 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3060 u32 grbm_int_cntl = 0;
3062 u32 d1grph = 0, d2grph = 0;
3064 if (!rdev->irq.installed) {
3065 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3068 /* don't enable anything if the ih is disabled */
3069 if (!rdev->ih.enabled) {
3070 r600_disable_interrupts(rdev);
3071 /* force the active interrupt state to all disabled */
3072 r600_disable_interrupt_state(rdev);
3076 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3077 if (ASIC_IS_DCE3(rdev)) {
3078 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3079 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3080 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3081 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3082 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3083 if (ASIC_IS_DCE32(rdev)) {
3084 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3085 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3088 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3089 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3090 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3091 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3094 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
3095 DRM_DEBUG("r600_irq_set: sw int\n");
3096 cp_int_cntl |= RB_INT_ENABLE;
3097 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3099 if (rdev->irq.crtc_vblank_int[0] ||
3100 rdev->irq.pflip[0]) {
3101 DRM_DEBUG("r600_irq_set: vblank 0\n");
3102 mode_int |= D1MODE_VBLANK_INT_MASK;
3104 if (rdev->irq.crtc_vblank_int[1] ||
3105 rdev->irq.pflip[1]) {
3106 DRM_DEBUG("r600_irq_set: vblank 1\n");
3107 mode_int |= D2MODE_VBLANK_INT_MASK;
3109 if (rdev->irq.hpd[0]) {
3110 DRM_DEBUG("r600_irq_set: hpd 1\n");
3111 hpd1 |= DC_HPDx_INT_EN;
3113 if (rdev->irq.hpd[1]) {
3114 DRM_DEBUG("r600_irq_set: hpd 2\n");
3115 hpd2 |= DC_HPDx_INT_EN;
3117 if (rdev->irq.hpd[2]) {
3118 DRM_DEBUG("r600_irq_set: hpd 3\n");
3119 hpd3 |= DC_HPDx_INT_EN;
3121 if (rdev->irq.hpd[3]) {
3122 DRM_DEBUG("r600_irq_set: hpd 4\n");
3123 hpd4 |= DC_HPDx_INT_EN;
3125 if (rdev->irq.hpd[4]) {
3126 DRM_DEBUG("r600_irq_set: hpd 5\n");
3127 hpd5 |= DC_HPDx_INT_EN;
3129 if (rdev->irq.hpd[5]) {
3130 DRM_DEBUG("r600_irq_set: hpd 6\n");
3131 hpd6 |= DC_HPDx_INT_EN;
3133 if (rdev->irq.hdmi[0]) {
3134 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3135 hdmi1 |= R600_HDMI_INT_EN;
3137 if (rdev->irq.hdmi[1]) {
3138 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3139 hdmi2 |= R600_HDMI_INT_EN;
3141 if (rdev->irq.gui_idle) {
3142 DRM_DEBUG("gui idle\n");
3143 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3146 WREG32(CP_INT_CNTL, cp_int_cntl);
3147 WREG32(DxMODE_INT_MASK, mode_int);
3148 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3149 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3150 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3151 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3152 if (ASIC_IS_DCE3(rdev)) {
3153 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3154 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3155 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3156 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3157 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3158 if (ASIC_IS_DCE32(rdev)) {
3159 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3160 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3163 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3164 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3165 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3166 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3172 static void r600_irq_ack(struct radeon_device *rdev)
3176 if (ASIC_IS_DCE3(rdev)) {
3177 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3178 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3179 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3181 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3182 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3183 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3185 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3186 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3188 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3189 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3190 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3191 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3192 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3193 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3194 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3195 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3196 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3197 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3198 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3199 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3200 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3201 if (ASIC_IS_DCE3(rdev)) {
3202 tmp = RREG32(DC_HPD1_INT_CONTROL);
3203 tmp |= DC_HPDx_INT_ACK;
3204 WREG32(DC_HPD1_INT_CONTROL, tmp);
3206 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3207 tmp |= DC_HPDx_INT_ACK;
3208 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3211 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3212 if (ASIC_IS_DCE3(rdev)) {
3213 tmp = RREG32(DC_HPD2_INT_CONTROL);
3214 tmp |= DC_HPDx_INT_ACK;
3215 WREG32(DC_HPD2_INT_CONTROL, tmp);
3217 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3218 tmp |= DC_HPDx_INT_ACK;
3219 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3222 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3223 if (ASIC_IS_DCE3(rdev)) {
3224 tmp = RREG32(DC_HPD3_INT_CONTROL);
3225 tmp |= DC_HPDx_INT_ACK;
3226 WREG32(DC_HPD3_INT_CONTROL, tmp);
3228 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3229 tmp |= DC_HPDx_INT_ACK;
3230 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3233 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3234 tmp = RREG32(DC_HPD4_INT_CONTROL);
3235 tmp |= DC_HPDx_INT_ACK;
3236 WREG32(DC_HPD4_INT_CONTROL, tmp);
3238 if (ASIC_IS_DCE32(rdev)) {
3239 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3240 tmp = RREG32(DC_HPD5_INT_CONTROL);
3241 tmp |= DC_HPDx_INT_ACK;
3242 WREG32(DC_HPD5_INT_CONTROL, tmp);
3244 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3245 tmp = RREG32(DC_HPD5_INT_CONTROL);
3246 tmp |= DC_HPDx_INT_ACK;
3247 WREG32(DC_HPD6_INT_CONTROL, tmp);
3250 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3251 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3253 if (ASIC_IS_DCE3(rdev)) {
3254 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3255 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3258 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3259 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3264 void r600_irq_disable(struct radeon_device *rdev)
3266 r600_disable_interrupts(rdev);
3267 /* Wait and acknowledge irq */
3270 r600_disable_interrupt_state(rdev);
3273 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3277 if (rdev->wb.enabled)
3278 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3280 wptr = RREG32(IH_RB_WPTR);
3282 if (wptr & RB_OVERFLOW) {
3283 /* When a ring buffer overflow happen start parsing interrupt
3284 * from the last not overwritten vector (wptr + 16). Hopefully
3285 * this should allow us to catchup.
3287 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3288 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3289 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3290 tmp = RREG32(IH_RB_CNTL);
3291 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3292 WREG32(IH_RB_CNTL, tmp);
3294 return (wptr & rdev->ih.ptr_mask);
3298 * Each IV ring entry is 128 bits:
3299 * [7:0] - interrupt source id
3301 * [59:32] - interrupt source data
3302 * [127:60] - reserved
3304 * The basic interrupt vector entries
3305 * are decoded as follows:
3306 * src_id src_data description
3311 * 19 0 FP Hot plug detection A
3312 * 19 1 FP Hot plug detection B
3313 * 19 2 DAC A auto-detection
3314 * 19 3 DAC B auto-detection
3320 * 181 - EOP Interrupt
3323 * Note, these are based on r600 and may need to be
3324 * adjusted or added to on newer asics
3327 int r600_irq_process(struct radeon_device *rdev)
3331 u32 src_id, src_data;
3333 unsigned long flags;
3334 bool queue_hotplug = false;
3336 if (!rdev->ih.enabled || rdev->shutdown)
3339 /* No MSIs, need a dummy read to flush PCI DMAs */
3340 if (!rdev->msi_enabled)
3343 wptr = r600_get_ih_wptr(rdev);
3344 rptr = rdev->ih.rptr;
3345 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3347 spin_lock_irqsave(&rdev->ih.lock, flags);
3350 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3355 /* Order reading of wptr vs. reading of IH ring data */
3358 /* display interrupts */
3361 rdev->ih.wptr = wptr;
3362 while (rptr != wptr) {
3363 /* wptr/rptr are in bytes! */
3364 ring_index = rptr / 4;
3365 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3366 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3369 case 1: /* D1 vblank/vline */
3371 case 0: /* D1 vblank */
3372 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3373 if (rdev->irq.crtc_vblank_int[0]) {
3374 drm_handle_vblank(rdev->ddev, 0);
3375 rdev->pm.vblank_sync = true;
3376 wake_up(&rdev->irq.vblank_queue);
3378 if (rdev->irq.pflip[0])
3379 radeon_crtc_handle_flip(rdev, 0);
3380 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3381 DRM_DEBUG("IH: D1 vblank\n");
3384 case 1: /* D1 vline */
3385 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3386 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3387 DRM_DEBUG("IH: D1 vline\n");
3391 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3395 case 5: /* D2 vblank/vline */
3397 case 0: /* D2 vblank */
3398 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3399 if (rdev->irq.crtc_vblank_int[1]) {
3400 drm_handle_vblank(rdev->ddev, 1);
3401 rdev->pm.vblank_sync = true;
3402 wake_up(&rdev->irq.vblank_queue);
3404 if (rdev->irq.pflip[1])
3405 radeon_crtc_handle_flip(rdev, 1);
3406 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3407 DRM_DEBUG("IH: D2 vblank\n");
3410 case 1: /* D1 vline */
3411 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3412 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3413 DRM_DEBUG("IH: D2 vline\n");
3417 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3421 case 19: /* HPD/DAC hotplug */
3424 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3425 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3426 queue_hotplug = true;
3427 DRM_DEBUG("IH: HPD1\n");
3431 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3432 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3433 queue_hotplug = true;
3434 DRM_DEBUG("IH: HPD2\n");
3438 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3439 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3440 queue_hotplug = true;
3441 DRM_DEBUG("IH: HPD3\n");
3445 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3446 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3447 queue_hotplug = true;
3448 DRM_DEBUG("IH: HPD4\n");
3452 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3453 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3454 queue_hotplug = true;
3455 DRM_DEBUG("IH: HPD5\n");
3459 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3460 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3461 queue_hotplug = true;
3462 DRM_DEBUG("IH: HPD6\n");
3466 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3471 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3472 r600_audio_schedule_polling(rdev);
3474 case 176: /* CP_INT in ring buffer */
3475 case 177: /* CP_INT in IB1 */
3476 case 178: /* CP_INT in IB2 */
3477 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3478 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3480 case 181: /* CP EOP event */
3481 DRM_DEBUG("IH: CP EOP\n");
3482 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3484 case 233: /* GUI IDLE */
3485 DRM_DEBUG("IH: GUI idle\n");
3486 rdev->pm.gui_idle = true;
3487 wake_up(&rdev->irq.idle_queue);
3490 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3494 /* wptr/rptr are in bytes! */
3496 rptr &= rdev->ih.ptr_mask;
3498 /* make sure wptr hasn't changed while processing */
3499 wptr = r600_get_ih_wptr(rdev);
3500 if (wptr != rdev->ih.wptr)
3503 schedule_work(&rdev->hotplug_work);
3504 rdev->ih.rptr = rptr;
3505 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3506 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3513 #if defined(CONFIG_DEBUG_FS)
3515 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3517 struct drm_info_node *node = (struct drm_info_node *) m->private;
3518 struct drm_device *dev = node->minor->dev;
3519 struct radeon_device *rdev = dev->dev_private;
3521 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3522 DREG32_SYS(m, rdev, VM_L2_STATUS);
3526 static struct drm_info_list r600_mc_info_list[] = {
3527 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3531 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3533 #if defined(CONFIG_DEBUG_FS)
3534 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3541 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3542 * rdev: radeon device structure
3543 * bo: buffer object struct which userspace is waiting for idle
3545 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3546 * through ring buffer, this leads to corruption in rendering, see
3547 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3548 * directly perform HDP flush by writing register through MMIO.
3550 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3552 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3553 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3554 * This seems to cause problems on some AGP cards. Just use the old
3557 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3558 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3559 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3562 WREG32(HDP_DEBUG1, 0);
3563 tmp = readl((void __iomem *)ptr);
3565 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3568 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3570 u32 link_width_cntl, mask, target_reg;
3572 if (rdev->flags & RADEON_IS_IGP)
3575 if (!(rdev->flags & RADEON_IS_PCIE))
3578 /* x2 cards have a special sequence */
3579 if (ASIC_IS_X2(rdev))
3582 /* FIXME wait for idle */
3586 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3589 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3592 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3595 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3598 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3601 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3605 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3609 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3611 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3612 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3615 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3618 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3619 RADEON_PCIE_LC_RECONFIG_NOW |
3620 R600_PCIE_LC_RENEGOTIATE_EN |
3621 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3622 link_width_cntl |= mask;
3624 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3626 /* some northbridges can renegotiate the link rather than requiring
3627 * a complete re-config.
3628 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3630 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3631 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3633 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3635 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3636 RADEON_PCIE_LC_RECONFIG_NOW));
3638 if (rdev->family >= CHIP_RV770)
3639 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3641 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3643 /* wait for lane set to complete */
3644 link_width_cntl = RREG32(target_reg);
3645 while (link_width_cntl == 0xffffffff)
3646 link_width_cntl = RREG32(target_reg);
3650 int r600_get_pcie_lanes(struct radeon_device *rdev)
3652 u32 link_width_cntl;
3654 if (rdev->flags & RADEON_IS_IGP)
3657 if (!(rdev->flags & RADEON_IS_PCIE))
3660 /* x2 cards have a special sequence */
3661 if (ASIC_IS_X2(rdev))
3664 /* FIXME wait for idle */
3666 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3668 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3669 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3671 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3673 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3675 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3677 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3679 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3685 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3687 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3690 if (radeon_pcie_gen2 == 0)
3693 if (rdev->flags & RADEON_IS_IGP)
3696 if (!(rdev->flags & RADEON_IS_PCIE))
3699 /* x2 cards have a special sequence */
3700 if (ASIC_IS_X2(rdev))
3703 /* only RV6xx+ chips are supported */
3704 if (rdev->family <= CHIP_R600)
3707 /* 55 nm r6xx asics */
3708 if ((rdev->family == CHIP_RV670) ||
3709 (rdev->family == CHIP_RV620) ||
3710 (rdev->family == CHIP_RV635)) {
3711 /* advertise upconfig capability */
3712 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3713 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3714 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3715 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3716 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3717 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3718 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3719 LC_RECONFIG_ARC_MISSING_ESCAPE);
3720 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3721 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3723 link_width_cntl |= LC_UPCONFIGURE_DIS;
3724 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3728 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3729 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3730 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3732 /* 55 nm r6xx asics */
3733 if ((rdev->family == CHIP_RV670) ||
3734 (rdev->family == CHIP_RV620) ||
3735 (rdev->family == CHIP_RV635)) {
3736 WREG32(MM_CFGREGS_CNTL, 0x8);
3737 link_cntl2 = RREG32(0x4088);
3738 WREG32(MM_CFGREGS_CNTL, 0);
3739 /* not supported yet */
3740 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3744 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3745 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3746 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3747 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3748 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3749 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3751 tmp = RREG32(0x541c);
3752 WREG32(0x541c, tmp | 0x8);
3753 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3754 link_cntl2 = RREG16(0x4088);
3755 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3757 WREG16(0x4088, link_cntl2);
3758 WREG32(MM_CFGREGS_CNTL, 0);
3760 if ((rdev->family == CHIP_RV670) ||
3761 (rdev->family == CHIP_RV620) ||
3762 (rdev->family == CHIP_RV635)) {
3763 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3764 training_cntl &= ~LC_POINT_7_PLUS_EN;
3765 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3767 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3768 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3769 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3772 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3773 speed_cntl |= LC_GEN2_EN_STRAP;
3774 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3777 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3778 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3780 link_width_cntl |= LC_UPCONFIGURE_DIS;
3782 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3783 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);