PM / x86: Save/restore MISC_ENABLE register
[pandora-kernel.git] / drivers / gpu / drm / radeon / r600.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include "drmP.h"
33 #include "radeon_drm.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
50
51 /* Firmware Names */
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
88
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
94
95 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
96 {
97         int i;
98
99         rdev->pm.dynpm_can_upclock = true;
100         rdev->pm.dynpm_can_downclock = true;
101
102         /* power state array is low to high, default is first */
103         if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
104                 int min_power_state_index = 0;
105
106                 if (rdev->pm.num_power_states > 2)
107                         min_power_state_index = 1;
108
109                 switch (rdev->pm.dynpm_planned_action) {
110                 case DYNPM_ACTION_MINIMUM:
111                         rdev->pm.requested_power_state_index = min_power_state_index;
112                         rdev->pm.requested_clock_mode_index = 0;
113                         rdev->pm.dynpm_can_downclock = false;
114                         break;
115                 case DYNPM_ACTION_DOWNCLOCK:
116                         if (rdev->pm.current_power_state_index == min_power_state_index) {
117                                 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
118                                 rdev->pm.dynpm_can_downclock = false;
119                         } else {
120                                 if (rdev->pm.active_crtc_count > 1) {
121                                         for (i = 0; i < rdev->pm.num_power_states; i++) {
122                                                 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
123                                                         continue;
124                                                 else if (i >= rdev->pm.current_power_state_index) {
125                                                         rdev->pm.requested_power_state_index =
126                                                                 rdev->pm.current_power_state_index;
127                                                         break;
128                                                 } else {
129                                                         rdev->pm.requested_power_state_index = i;
130                                                         break;
131                                                 }
132                                         }
133                                 } else
134                                         rdev->pm.requested_power_state_index =
135                                                 rdev->pm.current_power_state_index - 1;
136                         }
137                         rdev->pm.requested_clock_mode_index = 0;
138                         /* don't use the power state if crtcs are active and no display flag is set */
139                         if ((rdev->pm.active_crtc_count > 0) &&
140                             (rdev->pm.power_state[rdev->pm.requested_power_state_index].
141                              clock_info[rdev->pm.requested_clock_mode_index].flags &
142                              RADEON_PM_MODE_NO_DISPLAY)) {
143                                 rdev->pm.requested_power_state_index++;
144                         }
145                         break;
146                 case DYNPM_ACTION_UPCLOCK:
147                         if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
148                                 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
149                                 rdev->pm.dynpm_can_upclock = false;
150                         } else {
151                                 if (rdev->pm.active_crtc_count > 1) {
152                                         for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
153                                                 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
154                                                         continue;
155                                                 else if (i <= rdev->pm.current_power_state_index) {
156                                                         rdev->pm.requested_power_state_index =
157                                                                 rdev->pm.current_power_state_index;
158                                                         break;
159                                                 } else {
160                                                         rdev->pm.requested_power_state_index = i;
161                                                         break;
162                                                 }
163                                         }
164                                 } else
165                                         rdev->pm.requested_power_state_index =
166                                                 rdev->pm.current_power_state_index + 1;
167                         }
168                         rdev->pm.requested_clock_mode_index = 0;
169                         break;
170                 case DYNPM_ACTION_DEFAULT:
171                         rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
172                         rdev->pm.requested_clock_mode_index = 0;
173                         rdev->pm.dynpm_can_upclock = false;
174                         break;
175                 case DYNPM_ACTION_NONE:
176                 default:
177                         DRM_ERROR("Requested mode for not defined action\n");
178                         return;
179                 }
180         } else {
181                 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
182                 /* for now just select the first power state and switch between clock modes */
183                 /* power state array is low to high, default is first (0) */
184                 if (rdev->pm.active_crtc_count > 1) {
185                         rdev->pm.requested_power_state_index = -1;
186                         /* start at 1 as we don't want the default mode */
187                         for (i = 1; i < rdev->pm.num_power_states; i++) {
188                                 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
189                                         continue;
190                                 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
191                                          (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
192                                         rdev->pm.requested_power_state_index = i;
193                                         break;
194                                 }
195                         }
196                         /* if nothing selected, grab the default state. */
197                         if (rdev->pm.requested_power_state_index == -1)
198                                 rdev->pm.requested_power_state_index = 0;
199                 } else
200                         rdev->pm.requested_power_state_index = 1;
201
202                 switch (rdev->pm.dynpm_planned_action) {
203                 case DYNPM_ACTION_MINIMUM:
204                         rdev->pm.requested_clock_mode_index = 0;
205                         rdev->pm.dynpm_can_downclock = false;
206                         break;
207                 case DYNPM_ACTION_DOWNCLOCK:
208                         if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
209                                 if (rdev->pm.current_clock_mode_index == 0) {
210                                         rdev->pm.requested_clock_mode_index = 0;
211                                         rdev->pm.dynpm_can_downclock = false;
212                                 } else
213                                         rdev->pm.requested_clock_mode_index =
214                                                 rdev->pm.current_clock_mode_index - 1;
215                         } else {
216                                 rdev->pm.requested_clock_mode_index = 0;
217                                 rdev->pm.dynpm_can_downclock = false;
218                         }
219                         /* don't use the power state if crtcs are active and no display flag is set */
220                         if ((rdev->pm.active_crtc_count > 0) &&
221                             (rdev->pm.power_state[rdev->pm.requested_power_state_index].
222                              clock_info[rdev->pm.requested_clock_mode_index].flags &
223                              RADEON_PM_MODE_NO_DISPLAY)) {
224                                 rdev->pm.requested_clock_mode_index++;
225                         }
226                         break;
227                 case DYNPM_ACTION_UPCLOCK:
228                         if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
229                                 if (rdev->pm.current_clock_mode_index ==
230                                     (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
231                                         rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
232                                         rdev->pm.dynpm_can_upclock = false;
233                                 } else
234                                         rdev->pm.requested_clock_mode_index =
235                                                 rdev->pm.current_clock_mode_index + 1;
236                         } else {
237                                 rdev->pm.requested_clock_mode_index =
238                                         rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
239                                 rdev->pm.dynpm_can_upclock = false;
240                         }
241                         break;
242                 case DYNPM_ACTION_DEFAULT:
243                         rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
244                         rdev->pm.requested_clock_mode_index = 0;
245                         rdev->pm.dynpm_can_upclock = false;
246                         break;
247                 case DYNPM_ACTION_NONE:
248                 default:
249                         DRM_ERROR("Requested mode for not defined action\n");
250                         return;
251                 }
252         }
253
254         DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
255                   rdev->pm.power_state[rdev->pm.requested_power_state_index].
256                   clock_info[rdev->pm.requested_clock_mode_index].sclk,
257                   rdev->pm.power_state[rdev->pm.requested_power_state_index].
258                   clock_info[rdev->pm.requested_clock_mode_index].mclk,
259                   rdev->pm.power_state[rdev->pm.requested_power_state_index].
260                   pcie_lanes);
261 }
262
263 static int r600_pm_get_type_index(struct radeon_device *rdev,
264                                   enum radeon_pm_state_type ps_type,
265                                   int instance)
266 {
267         int i;
268         int found_instance = -1;
269
270         for (i = 0; i < rdev->pm.num_power_states; i++) {
271                 if (rdev->pm.power_state[i].type == ps_type) {
272                         found_instance++;
273                         if (found_instance == instance)
274                                 return i;
275                 }
276         }
277         /* return default if no match */
278         return rdev->pm.default_power_state_index;
279 }
280
281 void rs780_pm_init_profile(struct radeon_device *rdev)
282 {
283         if (rdev->pm.num_power_states == 2) {
284                 /* default */
285                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
286                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
287                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
288                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
289                 /* low sh */
290                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
291                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
292                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
293                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
294                 /* high sh */
295                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
296                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
297                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
298                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
299                 /* low mh */
300                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
301                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
302                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
303                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
304                 /* high mh */
305                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
306                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
307                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
308                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
309         } else if (rdev->pm.num_power_states == 3) {
310                 /* default */
311                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
312                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
313                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
314                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
315                 /* low sh */
316                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
317                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
318                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
319                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
320                 /* high sh */
321                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
322                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
323                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
324                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
325                 /* low mh */
326                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
327                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
328                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
329                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
330                 /* high mh */
331                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
332                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
333                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
334                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
335         } else {
336                 /* default */
337                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
338                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
339                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
340                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
341                 /* low sh */
342                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
343                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
344                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
345                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
346                 /* high sh */
347                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
348                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
349                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
350                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
351                 /* low mh */
352                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
353                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
354                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
355                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
356                 /* high mh */
357                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
358                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
359                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
360                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
361         }
362 }
363
364 void r600_pm_init_profile(struct radeon_device *rdev)
365 {
366         if (rdev->family == CHIP_R600) {
367                 /* XXX */
368                 /* default */
369                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
370                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
371                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
372                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
373                 /* low sh */
374                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
375                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
376                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
377                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
378                 /* high sh */
379                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
380                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
381                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
382                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
383                 /* low mh */
384                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
385                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
386                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
387                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
388                 /* high mh */
389                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
390                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
391                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
392                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
393         } else {
394                 if (rdev->pm.num_power_states < 4) {
395                         /* default */
396                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
397                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
398                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
399                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
400                         /* low sh */
401                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
402                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
403                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
404                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
405                         /* high sh */
406                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
407                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
408                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
409                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
410                         /* low mh */
411                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
412                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
413                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
414                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
415                         /* high mh */
416                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
417                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
418                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
419                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
420                 } else {
421                         /* default */
422                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
425                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
426                         /* low sh */
427                         if (rdev->flags & RADEON_IS_MOBILITY) {
428                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
429                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
430                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
431                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
432                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
433                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
434                         } else {
435                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
436                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
437                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
438                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
439                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
440                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
441                         }
442                         /* high sh */
443                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
444                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
445                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
446                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
447                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
448                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
449                         /* low mh */
450                         if (rdev->flags & RADEON_IS_MOBILITY) {
451                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
452                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
453                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
454                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
455                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
456                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
457                         } else {
458                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
459                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
460                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
461                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
462                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
463                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
464                         }
465                         /* high mh */
466                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
467                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
468                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
469                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
470                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
471                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
472                 }
473         }
474 }
475
476 void r600_pm_misc(struct radeon_device *rdev)
477 {
478         int requested_index = rdev->pm.requested_power_state_index;
479         struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
480         struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
481
482         if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
483                 radeon_atom_set_voltage(rdev, voltage->voltage);
484
485 }
486
487 bool r600_gui_idle(struct radeon_device *rdev)
488 {
489         if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
490                 return false;
491         else
492                 return true;
493 }
494
495 /* hpd for digital panel detect/disconnect */
496 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
497 {
498         bool connected = false;
499
500         if (ASIC_IS_DCE3(rdev)) {
501                 switch (hpd) {
502                 case RADEON_HPD_1:
503                         if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
504                                 connected = true;
505                         break;
506                 case RADEON_HPD_2:
507                         if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
508                                 connected = true;
509                         break;
510                 case RADEON_HPD_3:
511                         if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
512                                 connected = true;
513                         break;
514                 case RADEON_HPD_4:
515                         if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
516                                 connected = true;
517                         break;
518                         /* DCE 3.2 */
519                 case RADEON_HPD_5:
520                         if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
521                                 connected = true;
522                         break;
523                 case RADEON_HPD_6:
524                         if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
525                                 connected = true;
526                         break;
527                 default:
528                         break;
529                 }
530         } else {
531                 switch (hpd) {
532                 case RADEON_HPD_1:
533                         if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
534                                 connected = true;
535                         break;
536                 case RADEON_HPD_2:
537                         if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
538                                 connected = true;
539                         break;
540                 case RADEON_HPD_3:
541                         if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
542                                 connected = true;
543                         break;
544                 default:
545                         break;
546                 }
547         }
548         return connected;
549 }
550
551 void r600_hpd_set_polarity(struct radeon_device *rdev,
552                            enum radeon_hpd_id hpd)
553 {
554         u32 tmp;
555         bool connected = r600_hpd_sense(rdev, hpd);
556
557         if (ASIC_IS_DCE3(rdev)) {
558                 switch (hpd) {
559                 case RADEON_HPD_1:
560                         tmp = RREG32(DC_HPD1_INT_CONTROL);
561                         if (connected)
562                                 tmp &= ~DC_HPDx_INT_POLARITY;
563                         else
564                                 tmp |= DC_HPDx_INT_POLARITY;
565                         WREG32(DC_HPD1_INT_CONTROL, tmp);
566                         break;
567                 case RADEON_HPD_2:
568                         tmp = RREG32(DC_HPD2_INT_CONTROL);
569                         if (connected)
570                                 tmp &= ~DC_HPDx_INT_POLARITY;
571                         else
572                                 tmp |= DC_HPDx_INT_POLARITY;
573                         WREG32(DC_HPD2_INT_CONTROL, tmp);
574                         break;
575                 case RADEON_HPD_3:
576                         tmp = RREG32(DC_HPD3_INT_CONTROL);
577                         if (connected)
578                                 tmp &= ~DC_HPDx_INT_POLARITY;
579                         else
580                                 tmp |= DC_HPDx_INT_POLARITY;
581                         WREG32(DC_HPD3_INT_CONTROL, tmp);
582                         break;
583                 case RADEON_HPD_4:
584                         tmp = RREG32(DC_HPD4_INT_CONTROL);
585                         if (connected)
586                                 tmp &= ~DC_HPDx_INT_POLARITY;
587                         else
588                                 tmp |= DC_HPDx_INT_POLARITY;
589                         WREG32(DC_HPD4_INT_CONTROL, tmp);
590                         break;
591                 case RADEON_HPD_5:
592                         tmp = RREG32(DC_HPD5_INT_CONTROL);
593                         if (connected)
594                                 tmp &= ~DC_HPDx_INT_POLARITY;
595                         else
596                                 tmp |= DC_HPDx_INT_POLARITY;
597                         WREG32(DC_HPD5_INT_CONTROL, tmp);
598                         break;
599                         /* DCE 3.2 */
600                 case RADEON_HPD_6:
601                         tmp = RREG32(DC_HPD6_INT_CONTROL);
602                         if (connected)
603                                 tmp &= ~DC_HPDx_INT_POLARITY;
604                         else
605                                 tmp |= DC_HPDx_INT_POLARITY;
606                         WREG32(DC_HPD6_INT_CONTROL, tmp);
607                         break;
608                 default:
609                         break;
610                 }
611         } else {
612                 switch (hpd) {
613                 case RADEON_HPD_1:
614                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
615                         if (connected)
616                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
617                         else
618                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
619                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
620                         break;
621                 case RADEON_HPD_2:
622                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
623                         if (connected)
624                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
625                         else
626                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
627                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
628                         break;
629                 case RADEON_HPD_3:
630                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
631                         if (connected)
632                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
633                         else
634                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
635                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
636                         break;
637                 default:
638                         break;
639                 }
640         }
641 }
642
643 void r600_hpd_init(struct radeon_device *rdev)
644 {
645         struct drm_device *dev = rdev->ddev;
646         struct drm_connector *connector;
647
648         if (ASIC_IS_DCE3(rdev)) {
649                 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
650                 if (ASIC_IS_DCE32(rdev))
651                         tmp |= DC_HPDx_EN;
652
653                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
654                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
655                         switch (radeon_connector->hpd.hpd) {
656                         case RADEON_HPD_1:
657                                 WREG32(DC_HPD1_CONTROL, tmp);
658                                 rdev->irq.hpd[0] = true;
659                                 break;
660                         case RADEON_HPD_2:
661                                 WREG32(DC_HPD2_CONTROL, tmp);
662                                 rdev->irq.hpd[1] = true;
663                                 break;
664                         case RADEON_HPD_3:
665                                 WREG32(DC_HPD3_CONTROL, tmp);
666                                 rdev->irq.hpd[2] = true;
667                                 break;
668                         case RADEON_HPD_4:
669                                 WREG32(DC_HPD4_CONTROL, tmp);
670                                 rdev->irq.hpd[3] = true;
671                                 break;
672                                 /* DCE 3.2 */
673                         case RADEON_HPD_5:
674                                 WREG32(DC_HPD5_CONTROL, tmp);
675                                 rdev->irq.hpd[4] = true;
676                                 break;
677                         case RADEON_HPD_6:
678                                 WREG32(DC_HPD6_CONTROL, tmp);
679                                 rdev->irq.hpd[5] = true;
680                                 break;
681                         default:
682                                 break;
683                         }
684                 }
685         } else {
686                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
687                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
688                         switch (radeon_connector->hpd.hpd) {
689                         case RADEON_HPD_1:
690                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
691                                 rdev->irq.hpd[0] = true;
692                                 break;
693                         case RADEON_HPD_2:
694                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
695                                 rdev->irq.hpd[1] = true;
696                                 break;
697                         case RADEON_HPD_3:
698                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
699                                 rdev->irq.hpd[2] = true;
700                                 break;
701                         default:
702                                 break;
703                         }
704                 }
705         }
706         if (rdev->irq.installed)
707                 r600_irq_set(rdev);
708 }
709
710 void r600_hpd_fini(struct radeon_device *rdev)
711 {
712         struct drm_device *dev = rdev->ddev;
713         struct drm_connector *connector;
714
715         if (ASIC_IS_DCE3(rdev)) {
716                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
717                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
718                         switch (radeon_connector->hpd.hpd) {
719                         case RADEON_HPD_1:
720                                 WREG32(DC_HPD1_CONTROL, 0);
721                                 rdev->irq.hpd[0] = false;
722                                 break;
723                         case RADEON_HPD_2:
724                                 WREG32(DC_HPD2_CONTROL, 0);
725                                 rdev->irq.hpd[1] = false;
726                                 break;
727                         case RADEON_HPD_3:
728                                 WREG32(DC_HPD3_CONTROL, 0);
729                                 rdev->irq.hpd[2] = false;
730                                 break;
731                         case RADEON_HPD_4:
732                                 WREG32(DC_HPD4_CONTROL, 0);
733                                 rdev->irq.hpd[3] = false;
734                                 break;
735                                 /* DCE 3.2 */
736                         case RADEON_HPD_5:
737                                 WREG32(DC_HPD5_CONTROL, 0);
738                                 rdev->irq.hpd[4] = false;
739                                 break;
740                         case RADEON_HPD_6:
741                                 WREG32(DC_HPD6_CONTROL, 0);
742                                 rdev->irq.hpd[5] = false;
743                                 break;
744                         default:
745                                 break;
746                         }
747                 }
748         } else {
749                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
750                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
751                         switch (radeon_connector->hpd.hpd) {
752                         case RADEON_HPD_1:
753                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
754                                 rdev->irq.hpd[0] = false;
755                                 break;
756                         case RADEON_HPD_2:
757                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
758                                 rdev->irq.hpd[1] = false;
759                                 break;
760                         case RADEON_HPD_3:
761                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
762                                 rdev->irq.hpd[2] = false;
763                                 break;
764                         default:
765                                 break;
766                         }
767                 }
768         }
769 }
770
771 /*
772  * R600 PCIE GART
773  */
774 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
775 {
776         unsigned i;
777         u32 tmp;
778
779         /* flush hdp cache so updates hit vram */
780         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
781
782         WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
783         WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
784         WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
785         for (i = 0; i < rdev->usec_timeout; i++) {
786                 /* read MC_STATUS */
787                 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
788                 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
789                 if (tmp == 2) {
790                         printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
791                         return;
792                 }
793                 if (tmp) {
794                         return;
795                 }
796                 udelay(1);
797         }
798 }
799
800 int r600_pcie_gart_init(struct radeon_device *rdev)
801 {
802         int r;
803
804         if (rdev->gart.table.vram.robj) {
805                 WARN(1, "R600 PCIE GART already initialized.\n");
806                 return 0;
807         }
808         /* Initialize common gart structure */
809         r = radeon_gart_init(rdev);
810         if (r)
811                 return r;
812         rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
813         return radeon_gart_table_vram_alloc(rdev);
814 }
815
816 int r600_pcie_gart_enable(struct radeon_device *rdev)
817 {
818         u32 tmp;
819         int r, i;
820
821         if (rdev->gart.table.vram.robj == NULL) {
822                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
823                 return -EINVAL;
824         }
825         r = radeon_gart_table_vram_pin(rdev);
826         if (r)
827                 return r;
828         radeon_gart_restore(rdev);
829
830         /* Setup L2 cache */
831         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
832                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
833                                 EFFECTIVE_L2_QUEUE_SIZE(7));
834         WREG32(VM_L2_CNTL2, 0);
835         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
836         /* Setup TLB control */
837         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
838                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
839                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
840                 ENABLE_WAIT_L2_QUERY;
841         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
842         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
843         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
844         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
845         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
846         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
847         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
848         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
849         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
850         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
851         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
852         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
853         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
854         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
855         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
856         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
857         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
858         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
859                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
860         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
861                         (u32)(rdev->dummy_page.addr >> 12));
862         for (i = 1; i < 7; i++)
863                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
864
865         r600_pcie_gart_tlb_flush(rdev);
866         rdev->gart.ready = true;
867         return 0;
868 }
869
870 void r600_pcie_gart_disable(struct radeon_device *rdev)
871 {
872         u32 tmp;
873         int i, r;
874
875         /* Disable all tables */
876         for (i = 0; i < 7; i++)
877                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
878
879         /* Disable L2 cache */
880         WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
881                                 EFFECTIVE_L2_QUEUE_SIZE(7));
882         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
883         /* Setup L1 TLB control */
884         tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
885                 ENABLE_WAIT_L2_QUERY;
886         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
887         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
888         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
889         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
890         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
891         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
892         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
893         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
894         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
895         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
896         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
897         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
898         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
899         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
900         if (rdev->gart.table.vram.robj) {
901                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
902                 if (likely(r == 0)) {
903                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
904                         radeon_bo_unpin(rdev->gart.table.vram.robj);
905                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
906                 }
907         }
908 }
909
910 void r600_pcie_gart_fini(struct radeon_device *rdev)
911 {
912         radeon_gart_fini(rdev);
913         r600_pcie_gart_disable(rdev);
914         radeon_gart_table_vram_free(rdev);
915 }
916
917 void r600_agp_enable(struct radeon_device *rdev)
918 {
919         u32 tmp;
920         int i;
921
922         /* Setup L2 cache */
923         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
924                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
925                                 EFFECTIVE_L2_QUEUE_SIZE(7));
926         WREG32(VM_L2_CNTL2, 0);
927         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
928         /* Setup TLB control */
929         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
930                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
931                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
932                 ENABLE_WAIT_L2_QUERY;
933         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
934         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
935         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
936         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
937         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
938         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
939         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
940         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
941         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
942         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
943         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
944         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
945         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
946         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
947         for (i = 0; i < 7; i++)
948                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
949 }
950
951 int r600_mc_wait_for_idle(struct radeon_device *rdev)
952 {
953         unsigned i;
954         u32 tmp;
955
956         for (i = 0; i < rdev->usec_timeout; i++) {
957                 /* read MC_STATUS */
958                 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
959                 if (!tmp)
960                         return 0;
961                 udelay(1);
962         }
963         return -1;
964 }
965
966 static void r600_mc_program(struct radeon_device *rdev)
967 {
968         struct rv515_mc_save save;
969         u32 tmp;
970         int i, j;
971
972         /* Initialize HDP */
973         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
974                 WREG32((0x2c14 + j), 0x00000000);
975                 WREG32((0x2c18 + j), 0x00000000);
976                 WREG32((0x2c1c + j), 0x00000000);
977                 WREG32((0x2c20 + j), 0x00000000);
978                 WREG32((0x2c24 + j), 0x00000000);
979         }
980         WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
981
982         rv515_mc_stop(rdev, &save);
983         if (r600_mc_wait_for_idle(rdev)) {
984                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
985         }
986         /* Lockout access through VGA aperture (doesn't exist before R600) */
987         WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
988         /* Update configuration */
989         if (rdev->flags & RADEON_IS_AGP) {
990                 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
991                         /* VRAM before AGP */
992                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
993                                 rdev->mc.vram_start >> 12);
994                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
995                                 rdev->mc.gtt_end >> 12);
996                 } else {
997                         /* VRAM after AGP */
998                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
999                                 rdev->mc.gtt_start >> 12);
1000                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1001                                 rdev->mc.vram_end >> 12);
1002                 }
1003         } else {
1004                 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1005                 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1006         }
1007         WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1008         tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1009         tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1010         WREG32(MC_VM_FB_LOCATION, tmp);
1011         WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1012         WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1013         WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
1014         if (rdev->flags & RADEON_IS_AGP) {
1015                 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1016                 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1017                 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1018         } else {
1019                 WREG32(MC_VM_AGP_BASE, 0);
1020                 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1021                 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1022         }
1023         if (r600_mc_wait_for_idle(rdev)) {
1024                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1025         }
1026         rv515_mc_resume(rdev, &save);
1027         /* we need to own VRAM, so turn off the VGA renderer here
1028          * to stop it overwriting our objects */
1029         rv515_vga_render_disable(rdev);
1030 }
1031
1032 /**
1033  * r600_vram_gtt_location - try to find VRAM & GTT location
1034  * @rdev: radeon device structure holding all necessary informations
1035  * @mc: memory controller structure holding memory informations
1036  *
1037  * Function will place try to place VRAM at same place as in CPU (PCI)
1038  * address space as some GPU seems to have issue when we reprogram at
1039  * different address space.
1040  *
1041  * If there is not enough space to fit the unvisible VRAM after the
1042  * aperture then we limit the VRAM size to the aperture.
1043  *
1044  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1045  * them to be in one from GPU point of view so that we can program GPU to
1046  * catch access outside them (weird GPU policy see ??).
1047  *
1048  * This function will never fails, worst case are limiting VRAM or GTT.
1049  *
1050  * Note: GTT start, end, size should be initialized before calling this
1051  * function on AGP platform.
1052  */
1053 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1054 {
1055         u64 size_bf, size_af;
1056
1057         if (mc->mc_vram_size > 0xE0000000) {
1058                 /* leave room for at least 512M GTT */
1059                 dev_warn(rdev->dev, "limiting VRAM\n");
1060                 mc->real_vram_size = 0xE0000000;
1061                 mc->mc_vram_size = 0xE0000000;
1062         }
1063         if (rdev->flags & RADEON_IS_AGP) {
1064                 size_bf = mc->gtt_start;
1065                 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1066                 if (size_bf > size_af) {
1067                         if (mc->mc_vram_size > size_bf) {
1068                                 dev_warn(rdev->dev, "limiting VRAM\n");
1069                                 mc->real_vram_size = size_bf;
1070                                 mc->mc_vram_size = size_bf;
1071                         }
1072                         mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1073                 } else {
1074                         if (mc->mc_vram_size > size_af) {
1075                                 dev_warn(rdev->dev, "limiting VRAM\n");
1076                                 mc->real_vram_size = size_af;
1077                                 mc->mc_vram_size = size_af;
1078                         }
1079                         mc->vram_start = mc->gtt_end;
1080                 }
1081                 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1082                 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1083                                 mc->mc_vram_size >> 20, mc->vram_start,
1084                                 mc->vram_end, mc->real_vram_size >> 20);
1085         } else {
1086                 u64 base = 0;
1087                 if (rdev->flags & RADEON_IS_IGP)
1088                         base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
1089                 radeon_vram_location(rdev, &rdev->mc, base);
1090                 radeon_gtt_location(rdev, mc);
1091         }
1092 }
1093
1094 int r600_mc_init(struct radeon_device *rdev)
1095 {
1096         u32 tmp;
1097         int chansize, numchan;
1098
1099         /* Get VRAM informations */
1100         rdev->mc.vram_is_ddr = true;
1101         tmp = RREG32(RAMCFG);
1102         if (tmp & CHANSIZE_OVERRIDE) {
1103                 chansize = 16;
1104         } else if (tmp & CHANSIZE_MASK) {
1105                 chansize = 64;
1106         } else {
1107                 chansize = 32;
1108         }
1109         tmp = RREG32(CHMAP);
1110         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1111         case 0:
1112         default:
1113                 numchan = 1;
1114                 break;
1115         case 1:
1116                 numchan = 2;
1117                 break;
1118         case 2:
1119                 numchan = 4;
1120                 break;
1121         case 3:
1122                 numchan = 8;
1123                 break;
1124         }
1125         rdev->mc.vram_width = numchan * chansize;
1126         /* Could aper size report 0 ? */
1127         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1128         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1129         /* Setup GPU memory space */
1130         rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1131         rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1132         rdev->mc.visible_vram_size = rdev->mc.aper_size;
1133         r600_vram_gtt_location(rdev, &rdev->mc);
1134
1135         if (rdev->flags & RADEON_IS_IGP)
1136                 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1137         radeon_update_bandwidth_info(rdev);
1138         return 0;
1139 }
1140
1141 /* We doesn't check that the GPU really needs a reset we simply do the
1142  * reset, it's up to the caller to determine if the GPU needs one. We
1143  * might add an helper function to check that.
1144  */
1145 int r600_gpu_soft_reset(struct radeon_device *rdev)
1146 {
1147         struct rv515_mc_save save;
1148         u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1149                                 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1150                                 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1151                                 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1152                                 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1153                                 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1154                                 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1155                                 S_008010_GUI_ACTIVE(1);
1156         u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1157                         S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1158                         S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1159                         S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1160                         S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1161                         S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1162                         S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1163                         S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1164         u32 tmp;
1165
1166         dev_info(rdev->dev, "GPU softreset \n");
1167         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1168                 RREG32(R_008010_GRBM_STATUS));
1169         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1170                 RREG32(R_008014_GRBM_STATUS2));
1171         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1172                 RREG32(R_000E50_SRBM_STATUS));
1173         rv515_mc_stop(rdev, &save);
1174         if (r600_mc_wait_for_idle(rdev)) {
1175                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1176         }
1177         /* Disable CP parsing/prefetching */
1178         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1179         /* Check if any of the rendering block is busy and reset it */
1180         if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1181             (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1182                 tmp = S_008020_SOFT_RESET_CR(1) |
1183                         S_008020_SOFT_RESET_DB(1) |
1184                         S_008020_SOFT_RESET_CB(1) |
1185                         S_008020_SOFT_RESET_PA(1) |
1186                         S_008020_SOFT_RESET_SC(1) |
1187                         S_008020_SOFT_RESET_SMX(1) |
1188                         S_008020_SOFT_RESET_SPI(1) |
1189                         S_008020_SOFT_RESET_SX(1) |
1190                         S_008020_SOFT_RESET_SH(1) |
1191                         S_008020_SOFT_RESET_TC(1) |
1192                         S_008020_SOFT_RESET_TA(1) |
1193                         S_008020_SOFT_RESET_VC(1) |
1194                         S_008020_SOFT_RESET_VGT(1);
1195                 dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1196                 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1197                 RREG32(R_008020_GRBM_SOFT_RESET);
1198                 mdelay(15);
1199                 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1200         }
1201         /* Reset CP (we always reset CP) */
1202         tmp = S_008020_SOFT_RESET_CP(1);
1203         dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1204         WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1205         RREG32(R_008020_GRBM_SOFT_RESET);
1206         mdelay(15);
1207         WREG32(R_008020_GRBM_SOFT_RESET, 0);
1208         /* Wait a little for things to settle down */
1209         mdelay(1);
1210         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1211                 RREG32(R_008010_GRBM_STATUS));
1212         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1213                 RREG32(R_008014_GRBM_STATUS2));
1214         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1215                 RREG32(R_000E50_SRBM_STATUS));
1216         rv515_mc_resume(rdev, &save);
1217         return 0;
1218 }
1219
1220 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1221 {
1222         u32 srbm_status;
1223         u32 grbm_status;
1224         u32 grbm_status2;
1225         int r;
1226
1227         srbm_status = RREG32(R_000E50_SRBM_STATUS);
1228         grbm_status = RREG32(R_008010_GRBM_STATUS);
1229         grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1230         if (!G_008010_GUI_ACTIVE(grbm_status)) {
1231                 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1232                 return false;
1233         }
1234         /* force CP activities */
1235         r = radeon_ring_lock(rdev, 2);
1236         if (!r) {
1237                 /* PACKET2 NOP */
1238                 radeon_ring_write(rdev, 0x80000000);
1239                 radeon_ring_write(rdev, 0x80000000);
1240                 radeon_ring_unlock_commit(rdev);
1241         }
1242         rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1243         return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1244 }
1245
1246 int r600_asic_reset(struct radeon_device *rdev)
1247 {
1248         return r600_gpu_soft_reset(rdev);
1249 }
1250
1251 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1252                                              u32 num_backends,
1253                                              u32 backend_disable_mask)
1254 {
1255         u32 backend_map = 0;
1256         u32 enabled_backends_mask;
1257         u32 enabled_backends_count;
1258         u32 cur_pipe;
1259         u32 swizzle_pipe[R6XX_MAX_PIPES];
1260         u32 cur_backend;
1261         u32 i;
1262
1263         if (num_tile_pipes > R6XX_MAX_PIPES)
1264                 num_tile_pipes = R6XX_MAX_PIPES;
1265         if (num_tile_pipes < 1)
1266                 num_tile_pipes = 1;
1267         if (num_backends > R6XX_MAX_BACKENDS)
1268                 num_backends = R6XX_MAX_BACKENDS;
1269         if (num_backends < 1)
1270                 num_backends = 1;
1271
1272         enabled_backends_mask = 0;
1273         enabled_backends_count = 0;
1274         for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1275                 if (((backend_disable_mask >> i) & 1) == 0) {
1276                         enabled_backends_mask |= (1 << i);
1277                         ++enabled_backends_count;
1278                 }
1279                 if (enabled_backends_count == num_backends)
1280                         break;
1281         }
1282
1283         if (enabled_backends_count == 0) {
1284                 enabled_backends_mask = 1;
1285                 enabled_backends_count = 1;
1286         }
1287
1288         if (enabled_backends_count != num_backends)
1289                 num_backends = enabled_backends_count;
1290
1291         memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1292         switch (num_tile_pipes) {
1293         case 1:
1294                 swizzle_pipe[0] = 0;
1295                 break;
1296         case 2:
1297                 swizzle_pipe[0] = 0;
1298                 swizzle_pipe[1] = 1;
1299                 break;
1300         case 3:
1301                 swizzle_pipe[0] = 0;
1302                 swizzle_pipe[1] = 1;
1303                 swizzle_pipe[2] = 2;
1304                 break;
1305         case 4:
1306                 swizzle_pipe[0] = 0;
1307                 swizzle_pipe[1] = 1;
1308                 swizzle_pipe[2] = 2;
1309                 swizzle_pipe[3] = 3;
1310                 break;
1311         case 5:
1312                 swizzle_pipe[0] = 0;
1313                 swizzle_pipe[1] = 1;
1314                 swizzle_pipe[2] = 2;
1315                 swizzle_pipe[3] = 3;
1316                 swizzle_pipe[4] = 4;
1317                 break;
1318         case 6:
1319                 swizzle_pipe[0] = 0;
1320                 swizzle_pipe[1] = 2;
1321                 swizzle_pipe[2] = 4;
1322                 swizzle_pipe[3] = 5;
1323                 swizzle_pipe[4] = 1;
1324                 swizzle_pipe[5] = 3;
1325                 break;
1326         case 7:
1327                 swizzle_pipe[0] = 0;
1328                 swizzle_pipe[1] = 2;
1329                 swizzle_pipe[2] = 4;
1330                 swizzle_pipe[3] = 6;
1331                 swizzle_pipe[4] = 1;
1332                 swizzle_pipe[5] = 3;
1333                 swizzle_pipe[6] = 5;
1334                 break;
1335         case 8:
1336                 swizzle_pipe[0] = 0;
1337                 swizzle_pipe[1] = 2;
1338                 swizzle_pipe[2] = 4;
1339                 swizzle_pipe[3] = 6;
1340                 swizzle_pipe[4] = 1;
1341                 swizzle_pipe[5] = 3;
1342                 swizzle_pipe[6] = 5;
1343                 swizzle_pipe[7] = 7;
1344                 break;
1345         }
1346
1347         cur_backend = 0;
1348         for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1349                 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1350                         cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1351
1352                 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1353
1354                 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1355         }
1356
1357         return backend_map;
1358 }
1359
1360 int r600_count_pipe_bits(uint32_t val)
1361 {
1362         int i, ret = 0;
1363
1364         for (i = 0; i < 32; i++) {
1365                 ret += val & 1;
1366                 val >>= 1;
1367         }
1368         return ret;
1369 }
1370
1371 void r600_gpu_init(struct radeon_device *rdev)
1372 {
1373         u32 tiling_config;
1374         u32 ramcfg;
1375         u32 backend_map;
1376         u32 cc_rb_backend_disable;
1377         u32 cc_gc_shader_pipe_config;
1378         u32 tmp;
1379         int i, j;
1380         u32 sq_config;
1381         u32 sq_gpr_resource_mgmt_1 = 0;
1382         u32 sq_gpr_resource_mgmt_2 = 0;
1383         u32 sq_thread_resource_mgmt = 0;
1384         u32 sq_stack_resource_mgmt_1 = 0;
1385         u32 sq_stack_resource_mgmt_2 = 0;
1386
1387         /* FIXME: implement */
1388         switch (rdev->family) {
1389         case CHIP_R600:
1390                 rdev->config.r600.max_pipes = 4;
1391                 rdev->config.r600.max_tile_pipes = 8;
1392                 rdev->config.r600.max_simds = 4;
1393                 rdev->config.r600.max_backends = 4;
1394                 rdev->config.r600.max_gprs = 256;
1395                 rdev->config.r600.max_threads = 192;
1396                 rdev->config.r600.max_stack_entries = 256;
1397                 rdev->config.r600.max_hw_contexts = 8;
1398                 rdev->config.r600.max_gs_threads = 16;
1399                 rdev->config.r600.sx_max_export_size = 128;
1400                 rdev->config.r600.sx_max_export_pos_size = 16;
1401                 rdev->config.r600.sx_max_export_smx_size = 128;
1402                 rdev->config.r600.sq_num_cf_insts = 2;
1403                 break;
1404         case CHIP_RV630:
1405         case CHIP_RV635:
1406                 rdev->config.r600.max_pipes = 2;
1407                 rdev->config.r600.max_tile_pipes = 2;
1408                 rdev->config.r600.max_simds = 3;
1409                 rdev->config.r600.max_backends = 1;
1410                 rdev->config.r600.max_gprs = 128;
1411                 rdev->config.r600.max_threads = 192;
1412                 rdev->config.r600.max_stack_entries = 128;
1413                 rdev->config.r600.max_hw_contexts = 8;
1414                 rdev->config.r600.max_gs_threads = 4;
1415                 rdev->config.r600.sx_max_export_size = 128;
1416                 rdev->config.r600.sx_max_export_pos_size = 16;
1417                 rdev->config.r600.sx_max_export_smx_size = 128;
1418                 rdev->config.r600.sq_num_cf_insts = 2;
1419                 break;
1420         case CHIP_RV610:
1421         case CHIP_RV620:
1422         case CHIP_RS780:
1423         case CHIP_RS880:
1424                 rdev->config.r600.max_pipes = 1;
1425                 rdev->config.r600.max_tile_pipes = 1;
1426                 rdev->config.r600.max_simds = 2;
1427                 rdev->config.r600.max_backends = 1;
1428                 rdev->config.r600.max_gprs = 128;
1429                 rdev->config.r600.max_threads = 192;
1430                 rdev->config.r600.max_stack_entries = 128;
1431                 rdev->config.r600.max_hw_contexts = 4;
1432                 rdev->config.r600.max_gs_threads = 4;
1433                 rdev->config.r600.sx_max_export_size = 128;
1434                 rdev->config.r600.sx_max_export_pos_size = 16;
1435                 rdev->config.r600.sx_max_export_smx_size = 128;
1436                 rdev->config.r600.sq_num_cf_insts = 1;
1437                 break;
1438         case CHIP_RV670:
1439                 rdev->config.r600.max_pipes = 4;
1440                 rdev->config.r600.max_tile_pipes = 4;
1441                 rdev->config.r600.max_simds = 4;
1442                 rdev->config.r600.max_backends = 4;
1443                 rdev->config.r600.max_gprs = 192;
1444                 rdev->config.r600.max_threads = 192;
1445                 rdev->config.r600.max_stack_entries = 256;
1446                 rdev->config.r600.max_hw_contexts = 8;
1447                 rdev->config.r600.max_gs_threads = 16;
1448                 rdev->config.r600.sx_max_export_size = 128;
1449                 rdev->config.r600.sx_max_export_pos_size = 16;
1450                 rdev->config.r600.sx_max_export_smx_size = 128;
1451                 rdev->config.r600.sq_num_cf_insts = 2;
1452                 break;
1453         default:
1454                 break;
1455         }
1456
1457         /* Initialize HDP */
1458         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1459                 WREG32((0x2c14 + j), 0x00000000);
1460                 WREG32((0x2c18 + j), 0x00000000);
1461                 WREG32((0x2c1c + j), 0x00000000);
1462                 WREG32((0x2c20 + j), 0x00000000);
1463                 WREG32((0x2c24 + j), 0x00000000);
1464         }
1465
1466         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1467
1468         /* Setup tiling */
1469         tiling_config = 0;
1470         ramcfg = RREG32(RAMCFG);
1471         switch (rdev->config.r600.max_tile_pipes) {
1472         case 1:
1473                 tiling_config |= PIPE_TILING(0);
1474                 break;
1475         case 2:
1476                 tiling_config |= PIPE_TILING(1);
1477                 break;
1478         case 4:
1479                 tiling_config |= PIPE_TILING(2);
1480                 break;
1481         case 8:
1482                 tiling_config |= PIPE_TILING(3);
1483                 break;
1484         default:
1485                 break;
1486         }
1487         rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1488         rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1489         tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1490         tiling_config |= GROUP_SIZE(0);
1491         rdev->config.r600.tiling_group_size = 256;
1492         tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1493         if (tmp > 3) {
1494                 tiling_config |= ROW_TILING(3);
1495                 tiling_config |= SAMPLE_SPLIT(3);
1496         } else {
1497                 tiling_config |= ROW_TILING(tmp);
1498                 tiling_config |= SAMPLE_SPLIT(tmp);
1499         }
1500         tiling_config |= BANK_SWAPS(1);
1501
1502         cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1503         cc_rb_backend_disable |=
1504                 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1505
1506         cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1507         cc_gc_shader_pipe_config |=
1508                 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1509         cc_gc_shader_pipe_config |=
1510                 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1511
1512         backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1513                                                         (R6XX_MAX_BACKENDS -
1514                                                          r600_count_pipe_bits((cc_rb_backend_disable &
1515                                                                                R6XX_MAX_BACKENDS_MASK) >> 16)),
1516                                                         (cc_rb_backend_disable >> 16));
1517
1518         tiling_config |= BACKEND_MAP(backend_map);
1519         WREG32(GB_TILING_CONFIG, tiling_config);
1520         WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1521         WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1522
1523         /* Setup pipes */
1524         WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1525         WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1526         WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1527
1528         tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1529         WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1530         WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1531
1532         /* Setup some CP states */
1533         WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1534         WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1535
1536         WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1537                              SYNC_WALKER | SYNC_ALIGNER));
1538         /* Setup various GPU states */
1539         if (rdev->family == CHIP_RV670)
1540                 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1541
1542         tmp = RREG32(SX_DEBUG_1);
1543         tmp |= SMX_EVENT_RELEASE;
1544         if ((rdev->family > CHIP_R600))
1545                 tmp |= ENABLE_NEW_SMX_ADDRESS;
1546         WREG32(SX_DEBUG_1, tmp);
1547
1548         if (((rdev->family) == CHIP_R600) ||
1549             ((rdev->family) == CHIP_RV630) ||
1550             ((rdev->family) == CHIP_RV610) ||
1551             ((rdev->family) == CHIP_RV620) ||
1552             ((rdev->family) == CHIP_RS780) ||
1553             ((rdev->family) == CHIP_RS880)) {
1554                 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1555         } else {
1556                 WREG32(DB_DEBUG, 0);
1557         }
1558         WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1559                                DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1560
1561         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1562         WREG32(VGT_NUM_INSTANCES, 0);
1563
1564         WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1565         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1566
1567         tmp = RREG32(SQ_MS_FIFO_SIZES);
1568         if (((rdev->family) == CHIP_RV610) ||
1569             ((rdev->family) == CHIP_RV620) ||
1570             ((rdev->family) == CHIP_RS780) ||
1571             ((rdev->family) == CHIP_RS880)) {
1572                 tmp = (CACHE_FIFO_SIZE(0xa) |
1573                        FETCH_FIFO_HIWATER(0xa) |
1574                        DONE_FIFO_HIWATER(0xe0) |
1575                        ALU_UPDATE_FIFO_HIWATER(0x8));
1576         } else if (((rdev->family) == CHIP_R600) ||
1577                    ((rdev->family) == CHIP_RV630)) {
1578                 tmp &= ~DONE_FIFO_HIWATER(0xff);
1579                 tmp |= DONE_FIFO_HIWATER(0x4);
1580         }
1581         WREG32(SQ_MS_FIFO_SIZES, tmp);
1582
1583         /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1584          * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1585          */
1586         sq_config = RREG32(SQ_CONFIG);
1587         sq_config &= ~(PS_PRIO(3) |
1588                        VS_PRIO(3) |
1589                        GS_PRIO(3) |
1590                        ES_PRIO(3));
1591         sq_config |= (DX9_CONSTS |
1592                       VC_ENABLE |
1593                       PS_PRIO(0) |
1594                       VS_PRIO(1) |
1595                       GS_PRIO(2) |
1596                       ES_PRIO(3));
1597
1598         if ((rdev->family) == CHIP_R600) {
1599                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1600                                           NUM_VS_GPRS(124) |
1601                                           NUM_CLAUSE_TEMP_GPRS(4));
1602                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1603                                           NUM_ES_GPRS(0));
1604                 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1605                                            NUM_VS_THREADS(48) |
1606                                            NUM_GS_THREADS(4) |
1607                                            NUM_ES_THREADS(4));
1608                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1609                                             NUM_VS_STACK_ENTRIES(128));
1610                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1611                                             NUM_ES_STACK_ENTRIES(0));
1612         } else if (((rdev->family) == CHIP_RV610) ||
1613                    ((rdev->family) == CHIP_RV620) ||
1614                    ((rdev->family) == CHIP_RS780) ||
1615                    ((rdev->family) == CHIP_RS880)) {
1616                 /* no vertex cache */
1617                 sq_config &= ~VC_ENABLE;
1618
1619                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1620                                           NUM_VS_GPRS(44) |
1621                                           NUM_CLAUSE_TEMP_GPRS(2));
1622                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1623                                           NUM_ES_GPRS(17));
1624                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1625                                            NUM_VS_THREADS(78) |
1626                                            NUM_GS_THREADS(4) |
1627                                            NUM_ES_THREADS(31));
1628                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1629                                             NUM_VS_STACK_ENTRIES(40));
1630                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1631                                             NUM_ES_STACK_ENTRIES(16));
1632         } else if (((rdev->family) == CHIP_RV630) ||
1633                    ((rdev->family) == CHIP_RV635)) {
1634                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1635                                           NUM_VS_GPRS(44) |
1636                                           NUM_CLAUSE_TEMP_GPRS(2));
1637                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1638                                           NUM_ES_GPRS(18));
1639                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1640                                            NUM_VS_THREADS(78) |
1641                                            NUM_GS_THREADS(4) |
1642                                            NUM_ES_THREADS(31));
1643                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1644                                             NUM_VS_STACK_ENTRIES(40));
1645                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1646                                             NUM_ES_STACK_ENTRIES(16));
1647         } else if ((rdev->family) == CHIP_RV670) {
1648                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1649                                           NUM_VS_GPRS(44) |
1650                                           NUM_CLAUSE_TEMP_GPRS(2));
1651                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1652                                           NUM_ES_GPRS(17));
1653                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1654                                            NUM_VS_THREADS(78) |
1655                                            NUM_GS_THREADS(4) |
1656                                            NUM_ES_THREADS(31));
1657                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1658                                             NUM_VS_STACK_ENTRIES(64));
1659                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1660                                             NUM_ES_STACK_ENTRIES(64));
1661         }
1662
1663         WREG32(SQ_CONFIG, sq_config);
1664         WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1665         WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1666         WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1667         WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1668         WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1669
1670         if (((rdev->family) == CHIP_RV610) ||
1671             ((rdev->family) == CHIP_RV620) ||
1672             ((rdev->family) == CHIP_RS780) ||
1673             ((rdev->family) == CHIP_RS880)) {
1674                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1675         } else {
1676                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1677         }
1678
1679         /* More default values. 2D/3D driver should adjust as needed */
1680         WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1681                                          S1_X(0x4) | S1_Y(0xc)));
1682         WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1683                                          S1_X(0x2) | S1_Y(0x2) |
1684                                          S2_X(0xa) | S2_Y(0x6) |
1685                                          S3_X(0x6) | S3_Y(0xa)));
1686         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1687                                              S1_X(0x4) | S1_Y(0xc) |
1688                                              S2_X(0x1) | S2_Y(0x6) |
1689                                              S3_X(0xa) | S3_Y(0xe)));
1690         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1691                                              S5_X(0x0) | S5_Y(0x0) |
1692                                              S6_X(0xb) | S6_Y(0x4) |
1693                                              S7_X(0x7) | S7_Y(0x8)));
1694
1695         WREG32(VGT_STRMOUT_EN, 0);
1696         tmp = rdev->config.r600.max_pipes * 16;
1697         switch (rdev->family) {
1698         case CHIP_RV610:
1699         case CHIP_RV620:
1700         case CHIP_RS780:
1701         case CHIP_RS880:
1702                 tmp += 32;
1703                 break;
1704         case CHIP_RV670:
1705                 tmp += 128;
1706                 break;
1707         default:
1708                 break;
1709         }
1710         if (tmp > 256) {
1711                 tmp = 256;
1712         }
1713         WREG32(VGT_ES_PER_GS, 128);
1714         WREG32(VGT_GS_PER_ES, tmp);
1715         WREG32(VGT_GS_PER_VS, 2);
1716         WREG32(VGT_GS_VERTEX_REUSE, 16);
1717
1718         /* more default values. 2D/3D driver should adjust as needed */
1719         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1720         WREG32(VGT_STRMOUT_EN, 0);
1721         WREG32(SX_MISC, 0);
1722         WREG32(PA_SC_MODE_CNTL, 0);
1723         WREG32(PA_SC_AA_CONFIG, 0);
1724         WREG32(PA_SC_LINE_STIPPLE, 0);
1725         WREG32(SPI_INPUT_Z, 0);
1726         WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1727         WREG32(CB_COLOR7_FRAG, 0);
1728
1729         /* Clear render buffer base addresses */
1730         WREG32(CB_COLOR0_BASE, 0);
1731         WREG32(CB_COLOR1_BASE, 0);
1732         WREG32(CB_COLOR2_BASE, 0);
1733         WREG32(CB_COLOR3_BASE, 0);
1734         WREG32(CB_COLOR4_BASE, 0);
1735         WREG32(CB_COLOR5_BASE, 0);
1736         WREG32(CB_COLOR6_BASE, 0);
1737         WREG32(CB_COLOR7_BASE, 0);
1738         WREG32(CB_COLOR7_FRAG, 0);
1739
1740         switch (rdev->family) {
1741         case CHIP_RV610:
1742         case CHIP_RV620:
1743         case CHIP_RS780:
1744         case CHIP_RS880:
1745                 tmp = TC_L2_SIZE(8);
1746                 break;
1747         case CHIP_RV630:
1748         case CHIP_RV635:
1749                 tmp = TC_L2_SIZE(4);
1750                 break;
1751         case CHIP_R600:
1752                 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1753                 break;
1754         default:
1755                 tmp = TC_L2_SIZE(0);
1756                 break;
1757         }
1758         WREG32(TC_CNTL, tmp);
1759
1760         tmp = RREG32(HDP_HOST_PATH_CNTL);
1761         WREG32(HDP_HOST_PATH_CNTL, tmp);
1762
1763         tmp = RREG32(ARB_POP);
1764         tmp |= ENABLE_TC128;
1765         WREG32(ARB_POP, tmp);
1766
1767         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1768         WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1769                                NUM_CLIP_SEQ(3)));
1770         WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1771 }
1772
1773
1774 /*
1775  * Indirect registers accessor
1776  */
1777 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1778 {
1779         u32 r;
1780
1781         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1782         (void)RREG32(PCIE_PORT_INDEX);
1783         r = RREG32(PCIE_PORT_DATA);
1784         return r;
1785 }
1786
1787 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1788 {
1789         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1790         (void)RREG32(PCIE_PORT_INDEX);
1791         WREG32(PCIE_PORT_DATA, (v));
1792         (void)RREG32(PCIE_PORT_DATA);
1793 }
1794
1795 /*
1796  * CP & Ring
1797  */
1798 void r600_cp_stop(struct radeon_device *rdev)
1799 {
1800         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1801 }
1802
1803 int r600_init_microcode(struct radeon_device *rdev)
1804 {
1805         struct platform_device *pdev;
1806         const char *chip_name;
1807         const char *rlc_chip_name;
1808         size_t pfp_req_size, me_req_size, rlc_req_size;
1809         char fw_name[30];
1810         int err;
1811
1812         DRM_DEBUG("\n");
1813
1814         pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1815         err = IS_ERR(pdev);
1816         if (err) {
1817                 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1818                 return -EINVAL;
1819         }
1820
1821         switch (rdev->family) {
1822         case CHIP_R600:
1823                 chip_name = "R600";
1824                 rlc_chip_name = "R600";
1825                 break;
1826         case CHIP_RV610:
1827                 chip_name = "RV610";
1828                 rlc_chip_name = "R600";
1829                 break;
1830         case CHIP_RV630:
1831                 chip_name = "RV630";
1832                 rlc_chip_name = "R600";
1833                 break;
1834         case CHIP_RV620:
1835                 chip_name = "RV620";
1836                 rlc_chip_name = "R600";
1837                 break;
1838         case CHIP_RV635:
1839                 chip_name = "RV635";
1840                 rlc_chip_name = "R600";
1841                 break;
1842         case CHIP_RV670:
1843                 chip_name = "RV670";
1844                 rlc_chip_name = "R600";
1845                 break;
1846         case CHIP_RS780:
1847         case CHIP_RS880:
1848                 chip_name = "RS780";
1849                 rlc_chip_name = "R600";
1850                 break;
1851         case CHIP_RV770:
1852                 chip_name = "RV770";
1853                 rlc_chip_name = "R700";
1854                 break;
1855         case CHIP_RV730:
1856         case CHIP_RV740:
1857                 chip_name = "RV730";
1858                 rlc_chip_name = "R700";
1859                 break;
1860         case CHIP_RV710:
1861                 chip_name = "RV710";
1862                 rlc_chip_name = "R700";
1863                 break;
1864         case CHIP_CEDAR:
1865                 chip_name = "CEDAR";
1866                 rlc_chip_name = "CEDAR";
1867                 break;
1868         case CHIP_REDWOOD:
1869                 chip_name = "REDWOOD";
1870                 rlc_chip_name = "REDWOOD";
1871                 break;
1872         case CHIP_JUNIPER:
1873                 chip_name = "JUNIPER";
1874                 rlc_chip_name = "JUNIPER";
1875                 break;
1876         case CHIP_CYPRESS:
1877         case CHIP_HEMLOCK:
1878                 chip_name = "CYPRESS";
1879                 rlc_chip_name = "CYPRESS";
1880                 break;
1881         default: BUG();
1882         }
1883
1884         if (rdev->family >= CHIP_CEDAR) {
1885                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1886                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1887                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1888         } else if (rdev->family >= CHIP_RV770) {
1889                 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1890                 me_req_size = R700_PM4_UCODE_SIZE * 4;
1891                 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1892         } else {
1893                 pfp_req_size = PFP_UCODE_SIZE * 4;
1894                 me_req_size = PM4_UCODE_SIZE * 12;
1895                 rlc_req_size = RLC_UCODE_SIZE * 4;
1896         }
1897
1898         DRM_INFO("Loading %s Microcode\n", chip_name);
1899
1900         snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1901         err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1902         if (err)
1903                 goto out;
1904         if (rdev->pfp_fw->size != pfp_req_size) {
1905                 printk(KERN_ERR
1906                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1907                        rdev->pfp_fw->size, fw_name);
1908                 err = -EINVAL;
1909                 goto out;
1910         }
1911
1912         snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1913         err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1914         if (err)
1915                 goto out;
1916         if (rdev->me_fw->size != me_req_size) {
1917                 printk(KERN_ERR
1918                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1919                        rdev->me_fw->size, fw_name);
1920                 err = -EINVAL;
1921         }
1922
1923         snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1924         err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1925         if (err)
1926                 goto out;
1927         if (rdev->rlc_fw->size != rlc_req_size) {
1928                 printk(KERN_ERR
1929                        "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1930                        rdev->rlc_fw->size, fw_name);
1931                 err = -EINVAL;
1932         }
1933
1934 out:
1935         platform_device_unregister(pdev);
1936
1937         if (err) {
1938                 if (err != -EINVAL)
1939                         printk(KERN_ERR
1940                                "r600_cp: Failed to load firmware \"%s\"\n",
1941                                fw_name);
1942                 release_firmware(rdev->pfp_fw);
1943                 rdev->pfp_fw = NULL;
1944                 release_firmware(rdev->me_fw);
1945                 rdev->me_fw = NULL;
1946                 release_firmware(rdev->rlc_fw);
1947                 rdev->rlc_fw = NULL;
1948         }
1949         return err;
1950 }
1951
1952 static int r600_cp_load_microcode(struct radeon_device *rdev)
1953 {
1954         const __be32 *fw_data;
1955         int i;
1956
1957         if (!rdev->me_fw || !rdev->pfp_fw)
1958                 return -EINVAL;
1959
1960         r600_cp_stop(rdev);
1961
1962         WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1963
1964         /* Reset cp */
1965         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1966         RREG32(GRBM_SOFT_RESET);
1967         mdelay(15);
1968         WREG32(GRBM_SOFT_RESET, 0);
1969
1970         WREG32(CP_ME_RAM_WADDR, 0);
1971
1972         fw_data = (const __be32 *)rdev->me_fw->data;
1973         WREG32(CP_ME_RAM_WADDR, 0);
1974         for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1975                 WREG32(CP_ME_RAM_DATA,
1976                        be32_to_cpup(fw_data++));
1977
1978         fw_data = (const __be32 *)rdev->pfp_fw->data;
1979         WREG32(CP_PFP_UCODE_ADDR, 0);
1980         for (i = 0; i < PFP_UCODE_SIZE; i++)
1981                 WREG32(CP_PFP_UCODE_DATA,
1982                        be32_to_cpup(fw_data++));
1983
1984         WREG32(CP_PFP_UCODE_ADDR, 0);
1985         WREG32(CP_ME_RAM_WADDR, 0);
1986         WREG32(CP_ME_RAM_RADDR, 0);
1987         return 0;
1988 }
1989
1990 int r600_cp_start(struct radeon_device *rdev)
1991 {
1992         int r;
1993         uint32_t cp_me;
1994
1995         r = radeon_ring_lock(rdev, 7);
1996         if (r) {
1997                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1998                 return r;
1999         }
2000         radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2001         radeon_ring_write(rdev, 0x1);
2002         if (rdev->family >= CHIP_CEDAR) {
2003                 radeon_ring_write(rdev, 0x0);
2004                 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2005         } else if (rdev->family >= CHIP_RV770) {
2006                 radeon_ring_write(rdev, 0x0);
2007                 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2008         } else {
2009                 radeon_ring_write(rdev, 0x3);
2010                 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2011         }
2012         radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2013         radeon_ring_write(rdev, 0);
2014         radeon_ring_write(rdev, 0);
2015         radeon_ring_unlock_commit(rdev);
2016
2017         cp_me = 0xff;
2018         WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2019         return 0;
2020 }
2021
2022 int r600_cp_resume(struct radeon_device *rdev)
2023 {
2024         u32 tmp;
2025         u32 rb_bufsz;
2026         int r;
2027
2028         /* Reset cp */
2029         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2030         RREG32(GRBM_SOFT_RESET);
2031         mdelay(15);
2032         WREG32(GRBM_SOFT_RESET, 0);
2033
2034         /* Set ring buffer size */
2035         rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2036         tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2037 #ifdef __BIG_ENDIAN
2038         tmp |= BUF_SWAP_32BIT;
2039 #endif
2040         WREG32(CP_RB_CNTL, tmp);
2041         WREG32(CP_SEM_WAIT_TIMER, 0x4);
2042
2043         /* Set the write pointer delay */
2044         WREG32(CP_RB_WPTR_DELAY, 0);
2045
2046         /* Initialize the ring buffer's read and write pointers */
2047         WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2048         WREG32(CP_RB_RPTR_WR, 0);
2049         WREG32(CP_RB_WPTR, 0);
2050         WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
2051         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
2052         mdelay(1);
2053         WREG32(CP_RB_CNTL, tmp);
2054
2055         WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2056         WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2057
2058         rdev->cp.rptr = RREG32(CP_RB_RPTR);
2059         rdev->cp.wptr = RREG32(CP_RB_WPTR);
2060
2061         r600_cp_start(rdev);
2062         rdev->cp.ready = true;
2063         r = radeon_ring_test(rdev);
2064         if (r) {
2065                 rdev->cp.ready = false;
2066                 return r;
2067         }
2068         return 0;
2069 }
2070
2071 void r600_cp_commit(struct radeon_device *rdev)
2072 {
2073         WREG32(CP_RB_WPTR, rdev->cp.wptr);
2074         (void)RREG32(CP_RB_WPTR);
2075 }
2076
2077 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2078 {
2079         u32 rb_bufsz;
2080
2081         /* Align ring size */
2082         rb_bufsz = drm_order(ring_size / 8);
2083         ring_size = (1 << (rb_bufsz + 1)) * 4;
2084         rdev->cp.ring_size = ring_size;
2085         rdev->cp.align_mask = 16 - 1;
2086 }
2087
2088 void r600_cp_fini(struct radeon_device *rdev)
2089 {
2090         r600_cp_stop(rdev);
2091         radeon_ring_fini(rdev);
2092 }
2093
2094
2095 /*
2096  * GPU scratch registers helpers function.
2097  */
2098 void r600_scratch_init(struct radeon_device *rdev)
2099 {
2100         int i;
2101
2102         rdev->scratch.num_reg = 7;
2103         for (i = 0; i < rdev->scratch.num_reg; i++) {
2104                 rdev->scratch.free[i] = true;
2105                 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
2106         }
2107 }
2108
2109 int r600_ring_test(struct radeon_device *rdev)
2110 {
2111         uint32_t scratch;
2112         uint32_t tmp = 0;
2113         unsigned i;
2114         int r;
2115
2116         r = radeon_scratch_get(rdev, &scratch);
2117         if (r) {
2118                 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2119                 return r;
2120         }
2121         WREG32(scratch, 0xCAFEDEAD);
2122         r = radeon_ring_lock(rdev, 3);
2123         if (r) {
2124                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2125                 radeon_scratch_free(rdev, scratch);
2126                 return r;
2127         }
2128         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2129         radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2130         radeon_ring_write(rdev, 0xDEADBEEF);
2131         radeon_ring_unlock_commit(rdev);
2132         for (i = 0; i < rdev->usec_timeout; i++) {
2133                 tmp = RREG32(scratch);
2134                 if (tmp == 0xDEADBEEF)
2135                         break;
2136                 DRM_UDELAY(1);
2137         }
2138         if (i < rdev->usec_timeout) {
2139                 DRM_INFO("ring test succeeded in %d usecs\n", i);
2140         } else {
2141                 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2142                           scratch, tmp);
2143                 r = -EINVAL;
2144         }
2145         radeon_scratch_free(rdev, scratch);
2146         return r;
2147 }
2148
2149 void r600_wb_disable(struct radeon_device *rdev)
2150 {
2151         int r;
2152
2153         WREG32(SCRATCH_UMSK, 0);
2154         if (rdev->wb.wb_obj) {
2155                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2156                 if (unlikely(r != 0))
2157                         return;
2158                 radeon_bo_kunmap(rdev->wb.wb_obj);
2159                 radeon_bo_unpin(rdev->wb.wb_obj);
2160                 radeon_bo_unreserve(rdev->wb.wb_obj);
2161         }
2162 }
2163
2164 void r600_wb_fini(struct radeon_device *rdev)
2165 {
2166         r600_wb_disable(rdev);
2167         if (rdev->wb.wb_obj) {
2168                 radeon_bo_unref(&rdev->wb.wb_obj);
2169                 rdev->wb.wb = NULL;
2170                 rdev->wb.wb_obj = NULL;
2171         }
2172 }
2173
2174 int r600_wb_enable(struct radeon_device *rdev)
2175 {
2176         int r;
2177
2178         if (rdev->wb.wb_obj == NULL) {
2179                 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2180                                 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2181                 if (r) {
2182                         dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2183                         return r;
2184                 }
2185                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2186                 if (unlikely(r != 0)) {
2187                         r600_wb_fini(rdev);
2188                         return r;
2189                 }
2190                 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2191                                 &rdev->wb.gpu_addr);
2192                 if (r) {
2193                         radeon_bo_unreserve(rdev->wb.wb_obj);
2194                         dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2195                         r600_wb_fini(rdev);
2196                         return r;
2197                 }
2198                 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2199                 radeon_bo_unreserve(rdev->wb.wb_obj);
2200                 if (r) {
2201                         dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2202                         r600_wb_fini(rdev);
2203                         return r;
2204                 }
2205         }
2206         WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2207         WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2208         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2209         WREG32(SCRATCH_UMSK, 0xff);
2210         return 0;
2211 }
2212
2213 void r600_fence_ring_emit(struct radeon_device *rdev,
2214                           struct radeon_fence *fence)
2215 {
2216         /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
2217
2218         radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2219         radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2220         /* wait for 3D idle clean */
2221         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2222         radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2223         radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2224         /* Emit fence sequence & fire IRQ */
2225         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2226         radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2227         radeon_ring_write(rdev, fence->seq);
2228         /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2229         radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2230         radeon_ring_write(rdev, RB_INT_STAT);
2231 }
2232
2233 int r600_copy_blit(struct radeon_device *rdev,
2234                    uint64_t src_offset, uint64_t dst_offset,
2235                    unsigned num_pages, struct radeon_fence *fence)
2236 {
2237         int r;
2238
2239         mutex_lock(&rdev->r600_blit.mutex);
2240         rdev->r600_blit.vb_ib = NULL;
2241         r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2242         if (r) {
2243                 if (rdev->r600_blit.vb_ib)
2244                         radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2245                 mutex_unlock(&rdev->r600_blit.mutex);
2246                 return r;
2247         }
2248         r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2249         r600_blit_done_copy(rdev, fence);
2250         mutex_unlock(&rdev->r600_blit.mutex);
2251         return 0;
2252 }
2253
2254 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2255                          uint32_t tiling_flags, uint32_t pitch,
2256                          uint32_t offset, uint32_t obj_size)
2257 {
2258         /* FIXME: implement */
2259         return 0;
2260 }
2261
2262 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2263 {
2264         /* FIXME: implement */
2265 }
2266
2267
2268 bool r600_card_posted(struct radeon_device *rdev)
2269 {
2270         uint32_t reg;
2271
2272         /* first check CRTCs */
2273         reg = RREG32(D1CRTC_CONTROL) |
2274                 RREG32(D2CRTC_CONTROL);
2275         if (reg & CRTC_EN)
2276                 return true;
2277
2278         /* then check MEM_SIZE, in case the crtcs are off */
2279         if (RREG32(CONFIG_MEMSIZE))
2280                 return true;
2281
2282         return false;
2283 }
2284
2285 int r600_startup(struct radeon_device *rdev)
2286 {
2287         int r;
2288
2289         if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2290                 r = r600_init_microcode(rdev);
2291                 if (r) {
2292                         DRM_ERROR("Failed to load firmware!\n");
2293                         return r;
2294                 }
2295         }
2296
2297         r600_mc_program(rdev);
2298         if (rdev->flags & RADEON_IS_AGP) {
2299                 r600_agp_enable(rdev);
2300         } else {
2301                 r = r600_pcie_gart_enable(rdev);
2302                 if (r)
2303                         return r;
2304         }
2305         r600_gpu_init(rdev);
2306         r = r600_blit_init(rdev);
2307         if (r) {
2308                 r600_blit_fini(rdev);
2309                 rdev->asic->copy = NULL;
2310                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2311         }
2312         /* pin copy shader into vram */
2313         if (rdev->r600_blit.shader_obj) {
2314                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2315                 if (unlikely(r != 0))
2316                         return r;
2317                 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2318                                 &rdev->r600_blit.shader_gpu_addr);
2319                 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2320                 if (r) {
2321                         dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2322                         return r;
2323                 }
2324         }
2325         /* Enable IRQ */
2326         r = r600_irq_init(rdev);
2327         if (r) {
2328                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2329                 radeon_irq_kms_fini(rdev);
2330                 return r;
2331         }
2332         r600_irq_set(rdev);
2333
2334         r = radeon_ring_init(rdev, rdev->cp.ring_size);
2335         if (r)
2336                 return r;
2337         r = r600_cp_load_microcode(rdev);
2338         if (r)
2339                 return r;
2340         r = r600_cp_resume(rdev);
2341         if (r)
2342                 return r;
2343         /* write back buffer are not vital so don't worry about failure */
2344         r600_wb_enable(rdev);
2345         return 0;
2346 }
2347
2348 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2349 {
2350         uint32_t temp;
2351
2352         temp = RREG32(CONFIG_CNTL);
2353         if (state == false) {
2354                 temp &= ~(1<<0);
2355                 temp |= (1<<1);
2356         } else {
2357                 temp &= ~(1<<1);
2358         }
2359         WREG32(CONFIG_CNTL, temp);
2360 }
2361
2362 int r600_resume(struct radeon_device *rdev)
2363 {
2364         int r;
2365
2366         /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2367          * posting will perform necessary task to bring back GPU into good
2368          * shape.
2369          */
2370         /* post card */
2371         atom_asic_init(rdev->mode_info.atom_context);
2372         /* Initialize clocks */
2373         r = radeon_clocks_init(rdev);
2374         if (r) {
2375                 return r;
2376         }
2377
2378         r = r600_startup(rdev);
2379         if (r) {
2380                 DRM_ERROR("r600 startup failed on resume\n");
2381                 return r;
2382         }
2383
2384         r = r600_ib_test(rdev);
2385         if (r) {
2386                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2387                 return r;
2388         }
2389
2390         r = r600_audio_init(rdev);
2391         if (r) {
2392                 DRM_ERROR("radeon: audio resume failed\n");
2393                 return r;
2394         }
2395
2396         return r;
2397 }
2398
2399 int r600_suspend(struct radeon_device *rdev)
2400 {
2401         int r;
2402
2403         r600_audio_fini(rdev);
2404         /* FIXME: we should wait for ring to be empty */
2405         r600_cp_stop(rdev);
2406         rdev->cp.ready = false;
2407         r600_irq_suspend(rdev);
2408         r600_wb_disable(rdev);
2409         r600_pcie_gart_disable(rdev);
2410         /* unpin shaders bo */
2411         if (rdev->r600_blit.shader_obj) {
2412                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2413                 if (!r) {
2414                         radeon_bo_unpin(rdev->r600_blit.shader_obj);
2415                         radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2416                 }
2417         }
2418         return 0;
2419 }
2420
2421 /* Plan is to move initialization in that function and use
2422  * helper function so that radeon_device_init pretty much
2423  * do nothing more than calling asic specific function. This
2424  * should also allow to remove a bunch of callback function
2425  * like vram_info.
2426  */
2427 int r600_init(struct radeon_device *rdev)
2428 {
2429         int r;
2430
2431         r = radeon_dummy_page_init(rdev);
2432         if (r)
2433                 return r;
2434         if (r600_debugfs_mc_info_init(rdev)) {
2435                 DRM_ERROR("Failed to register debugfs file for mc !\n");
2436         }
2437         /* This don't do much */
2438         r = radeon_gem_init(rdev);
2439         if (r)
2440                 return r;
2441         /* Read BIOS */
2442         if (!radeon_get_bios(rdev)) {
2443                 if (ASIC_IS_AVIVO(rdev))
2444                         return -EINVAL;
2445         }
2446         /* Must be an ATOMBIOS */
2447         if (!rdev->is_atom_bios) {
2448                 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2449                 return -EINVAL;
2450         }
2451         r = radeon_atombios_init(rdev);
2452         if (r)
2453                 return r;
2454         /* Post card if necessary */
2455         if (!r600_card_posted(rdev)) {
2456                 if (!rdev->bios) {
2457                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2458                         return -EINVAL;
2459                 }
2460                 DRM_INFO("GPU not posted. posting now...\n");
2461                 atom_asic_init(rdev->mode_info.atom_context);
2462         }
2463         /* Initialize scratch registers */
2464         r600_scratch_init(rdev);
2465         /* Initialize surface registers */
2466         radeon_surface_init(rdev);
2467         /* Initialize clocks */
2468         radeon_get_clock_info(rdev->ddev);
2469         r = radeon_clocks_init(rdev);
2470         if (r)
2471                 return r;
2472         /* Fence driver */
2473         r = radeon_fence_driver_init(rdev);
2474         if (r)
2475                 return r;
2476         if (rdev->flags & RADEON_IS_AGP) {
2477                 r = radeon_agp_init(rdev);
2478                 if (r)
2479                         radeon_agp_disable(rdev);
2480         }
2481         r = r600_mc_init(rdev);
2482         if (r)
2483                 return r;
2484         /* Memory manager */
2485         r = radeon_bo_init(rdev);
2486         if (r)
2487                 return r;
2488
2489         r = radeon_irq_kms_init(rdev);
2490         if (r)
2491                 return r;
2492
2493         rdev->cp.ring_obj = NULL;
2494         r600_ring_init(rdev, 1024 * 1024);
2495
2496         rdev->ih.ring_obj = NULL;
2497         r600_ih_ring_init(rdev, 64 * 1024);
2498
2499         r = r600_pcie_gart_init(rdev);
2500         if (r)
2501                 return r;
2502
2503         rdev->accel_working = true;
2504         r = r600_startup(rdev);
2505         if (r) {
2506                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2507                 r600_cp_fini(rdev);
2508                 r600_wb_fini(rdev);
2509                 r600_irq_fini(rdev);
2510                 radeon_irq_kms_fini(rdev);
2511                 r600_pcie_gart_fini(rdev);
2512                 rdev->accel_working = false;
2513         }
2514         if (rdev->accel_working) {
2515                 r = radeon_ib_pool_init(rdev);
2516                 if (r) {
2517                         dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2518                         rdev->accel_working = false;
2519                 } else {
2520                         r = r600_ib_test(rdev);
2521                         if (r) {
2522                                 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2523                                 rdev->accel_working = false;
2524                         }
2525                 }
2526         }
2527
2528         r = r600_audio_init(rdev);
2529         if (r)
2530                 return r; /* TODO error handling */
2531         return 0;
2532 }
2533
2534 void r600_fini(struct radeon_device *rdev)
2535 {
2536         r600_audio_fini(rdev);
2537         r600_blit_fini(rdev);
2538         r600_cp_fini(rdev);
2539         r600_wb_fini(rdev);
2540         r600_irq_fini(rdev);
2541         radeon_irq_kms_fini(rdev);
2542         r600_pcie_gart_fini(rdev);
2543         radeon_agp_fini(rdev);
2544         radeon_gem_fini(rdev);
2545         radeon_fence_driver_fini(rdev);
2546         radeon_clocks_fini(rdev);
2547         radeon_bo_fini(rdev);
2548         radeon_atombios_fini(rdev);
2549         kfree(rdev->bios);
2550         rdev->bios = NULL;
2551         radeon_dummy_page_fini(rdev);
2552 }
2553
2554
2555 /*
2556  * CS stuff
2557  */
2558 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2559 {
2560         /* FIXME: implement */
2561         radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2562         radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2563         radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2564         radeon_ring_write(rdev, ib->length_dw);
2565 }
2566
2567 int r600_ib_test(struct radeon_device *rdev)
2568 {
2569         struct radeon_ib *ib;
2570         uint32_t scratch;
2571         uint32_t tmp = 0;
2572         unsigned i;
2573         int r;
2574
2575         r = radeon_scratch_get(rdev, &scratch);
2576         if (r) {
2577                 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2578                 return r;
2579         }
2580         WREG32(scratch, 0xCAFEDEAD);
2581         r = radeon_ib_get(rdev, &ib);
2582         if (r) {
2583                 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2584                 return r;
2585         }
2586         ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2587         ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2588         ib->ptr[2] = 0xDEADBEEF;
2589         ib->ptr[3] = PACKET2(0);
2590         ib->ptr[4] = PACKET2(0);
2591         ib->ptr[5] = PACKET2(0);
2592         ib->ptr[6] = PACKET2(0);
2593         ib->ptr[7] = PACKET2(0);
2594         ib->ptr[8] = PACKET2(0);
2595         ib->ptr[9] = PACKET2(0);
2596         ib->ptr[10] = PACKET2(0);
2597         ib->ptr[11] = PACKET2(0);
2598         ib->ptr[12] = PACKET2(0);
2599         ib->ptr[13] = PACKET2(0);
2600         ib->ptr[14] = PACKET2(0);
2601         ib->ptr[15] = PACKET2(0);
2602         ib->length_dw = 16;
2603         r = radeon_ib_schedule(rdev, ib);
2604         if (r) {
2605                 radeon_scratch_free(rdev, scratch);
2606                 radeon_ib_free(rdev, &ib);
2607                 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2608                 return r;
2609         }
2610         r = radeon_fence_wait(ib->fence, false);
2611         if (r) {
2612                 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2613                 return r;
2614         }
2615         for (i = 0; i < rdev->usec_timeout; i++) {
2616                 tmp = RREG32(scratch);
2617                 if (tmp == 0xDEADBEEF)
2618                         break;
2619                 DRM_UDELAY(1);
2620         }
2621         if (i < rdev->usec_timeout) {
2622                 DRM_INFO("ib test succeeded in %u usecs\n", i);
2623         } else {
2624                 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2625                           scratch, tmp);
2626                 r = -EINVAL;
2627         }
2628         radeon_scratch_free(rdev, scratch);
2629         radeon_ib_free(rdev, &ib);
2630         return r;
2631 }
2632
2633 /*
2634  * Interrupts
2635  *
2636  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2637  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2638  * writing to the ring and the GPU consuming, the GPU writes to the ring
2639  * and host consumes.  As the host irq handler processes interrupts, it
2640  * increments the rptr.  When the rptr catches up with the wptr, all the
2641  * current interrupts have been processed.
2642  */
2643
2644 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2645 {
2646         u32 rb_bufsz;
2647
2648         /* Align ring size */
2649         rb_bufsz = drm_order(ring_size / 4);
2650         ring_size = (1 << rb_bufsz) * 4;
2651         rdev->ih.ring_size = ring_size;
2652         rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2653         rdev->ih.rptr = 0;
2654 }
2655
2656 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2657 {
2658         int r;
2659
2660         /* Allocate ring buffer */
2661         if (rdev->ih.ring_obj == NULL) {
2662                 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2663                                      true,
2664                                      RADEON_GEM_DOMAIN_GTT,
2665                                      &rdev->ih.ring_obj);
2666                 if (r) {
2667                         DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2668                         return r;
2669                 }
2670                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2671                 if (unlikely(r != 0))
2672                         return r;
2673                 r = radeon_bo_pin(rdev->ih.ring_obj,
2674                                   RADEON_GEM_DOMAIN_GTT,
2675                                   &rdev->ih.gpu_addr);
2676                 if (r) {
2677                         radeon_bo_unreserve(rdev->ih.ring_obj);
2678                         DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2679                         return r;
2680                 }
2681                 r = radeon_bo_kmap(rdev->ih.ring_obj,
2682                                    (void **)&rdev->ih.ring);
2683                 radeon_bo_unreserve(rdev->ih.ring_obj);
2684                 if (r) {
2685                         DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2686                         return r;
2687                 }
2688         }
2689         return 0;
2690 }
2691
2692 static void r600_ih_ring_fini(struct radeon_device *rdev)
2693 {
2694         int r;
2695         if (rdev->ih.ring_obj) {
2696                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2697                 if (likely(r == 0)) {
2698                         radeon_bo_kunmap(rdev->ih.ring_obj);
2699                         radeon_bo_unpin(rdev->ih.ring_obj);
2700                         radeon_bo_unreserve(rdev->ih.ring_obj);
2701                 }
2702                 radeon_bo_unref(&rdev->ih.ring_obj);
2703                 rdev->ih.ring = NULL;
2704                 rdev->ih.ring_obj = NULL;
2705         }
2706 }
2707
2708 void r600_rlc_stop(struct radeon_device *rdev)
2709 {
2710
2711         if ((rdev->family >= CHIP_RV770) &&
2712             (rdev->family <= CHIP_RV740)) {
2713                 /* r7xx asics need to soft reset RLC before halting */
2714                 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2715                 RREG32(SRBM_SOFT_RESET);
2716                 udelay(15000);
2717                 WREG32(SRBM_SOFT_RESET, 0);
2718                 RREG32(SRBM_SOFT_RESET);
2719         }
2720
2721         WREG32(RLC_CNTL, 0);
2722 }
2723
2724 static void r600_rlc_start(struct radeon_device *rdev)
2725 {
2726         WREG32(RLC_CNTL, RLC_ENABLE);
2727 }
2728
2729 static int r600_rlc_init(struct radeon_device *rdev)
2730 {
2731         u32 i;
2732         const __be32 *fw_data;
2733
2734         if (!rdev->rlc_fw)
2735                 return -EINVAL;
2736
2737         r600_rlc_stop(rdev);
2738
2739         WREG32(RLC_HB_BASE, 0);
2740         WREG32(RLC_HB_CNTL, 0);
2741         WREG32(RLC_HB_RPTR, 0);
2742         WREG32(RLC_HB_WPTR, 0);
2743         WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2744         WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2745         WREG32(RLC_MC_CNTL, 0);
2746         WREG32(RLC_UCODE_CNTL, 0);
2747
2748         fw_data = (const __be32 *)rdev->rlc_fw->data;
2749         if (rdev->family >= CHIP_CEDAR) {
2750                 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2751                         WREG32(RLC_UCODE_ADDR, i);
2752                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2753                 }
2754         } else if (rdev->family >= CHIP_RV770) {
2755                 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2756                         WREG32(RLC_UCODE_ADDR, i);
2757                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2758                 }
2759         } else {
2760                 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2761                         WREG32(RLC_UCODE_ADDR, i);
2762                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2763                 }
2764         }
2765         WREG32(RLC_UCODE_ADDR, 0);
2766
2767         r600_rlc_start(rdev);
2768
2769         return 0;
2770 }
2771
2772 static void r600_enable_interrupts(struct radeon_device *rdev)
2773 {
2774         u32 ih_cntl = RREG32(IH_CNTL);
2775         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2776
2777         ih_cntl |= ENABLE_INTR;
2778         ih_rb_cntl |= IH_RB_ENABLE;
2779         WREG32(IH_CNTL, ih_cntl);
2780         WREG32(IH_RB_CNTL, ih_rb_cntl);
2781         rdev->ih.enabled = true;
2782 }
2783
2784 void r600_disable_interrupts(struct radeon_device *rdev)
2785 {
2786         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2787         u32 ih_cntl = RREG32(IH_CNTL);
2788
2789         ih_rb_cntl &= ~IH_RB_ENABLE;
2790         ih_cntl &= ~ENABLE_INTR;
2791         WREG32(IH_RB_CNTL, ih_rb_cntl);
2792         WREG32(IH_CNTL, ih_cntl);
2793         /* set rptr, wptr to 0 */
2794         WREG32(IH_RB_RPTR, 0);
2795         WREG32(IH_RB_WPTR, 0);
2796         rdev->ih.enabled = false;
2797         rdev->ih.wptr = 0;
2798         rdev->ih.rptr = 0;
2799 }
2800
2801 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2802 {
2803         u32 tmp;
2804
2805         WREG32(CP_INT_CNTL, 0);
2806         WREG32(GRBM_INT_CNTL, 0);
2807         WREG32(DxMODE_INT_MASK, 0);
2808         if (ASIC_IS_DCE3(rdev)) {
2809                 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2810                 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2811                 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2812                 WREG32(DC_HPD1_INT_CONTROL, tmp);
2813                 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2814                 WREG32(DC_HPD2_INT_CONTROL, tmp);
2815                 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2816                 WREG32(DC_HPD3_INT_CONTROL, tmp);
2817                 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2818                 WREG32(DC_HPD4_INT_CONTROL, tmp);
2819                 if (ASIC_IS_DCE32(rdev)) {
2820                         tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2821                         WREG32(DC_HPD5_INT_CONTROL, tmp);
2822                         tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2823                         WREG32(DC_HPD6_INT_CONTROL, tmp);
2824                 }
2825         } else {
2826                 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2827                 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2828                 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2829                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2830                 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2831                 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2832                 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2833                 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2834         }
2835 }
2836
2837 int r600_irq_init(struct radeon_device *rdev)
2838 {
2839         int ret = 0;
2840         int rb_bufsz;
2841         u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2842
2843         /* allocate ring */
2844         ret = r600_ih_ring_alloc(rdev);
2845         if (ret)
2846                 return ret;
2847
2848         /* disable irqs */
2849         r600_disable_interrupts(rdev);
2850
2851         /* init rlc */
2852         ret = r600_rlc_init(rdev);
2853         if (ret) {
2854                 r600_ih_ring_fini(rdev);
2855                 return ret;
2856         }
2857
2858         /* setup interrupt control */
2859         /* set dummy read address to ring address */
2860         WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2861         interrupt_cntl = RREG32(INTERRUPT_CNTL);
2862         /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2863          * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2864          */
2865         interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2866         /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2867         interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2868         WREG32(INTERRUPT_CNTL, interrupt_cntl);
2869
2870         WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2871         rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2872
2873         ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2874                       IH_WPTR_OVERFLOW_CLEAR |
2875                       (rb_bufsz << 1));
2876         /* WPTR writeback, not yet */
2877         /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2878         WREG32(IH_RB_WPTR_ADDR_LO, 0);
2879         WREG32(IH_RB_WPTR_ADDR_HI, 0);
2880
2881         WREG32(IH_RB_CNTL, ih_rb_cntl);
2882
2883         /* set rptr, wptr to 0 */
2884         WREG32(IH_RB_RPTR, 0);
2885         WREG32(IH_RB_WPTR, 0);
2886
2887         /* Default settings for IH_CNTL (disabled at first) */
2888         ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2889         /* RPTR_REARM only works if msi's are enabled */
2890         if (rdev->msi_enabled)
2891                 ih_cntl |= RPTR_REARM;
2892
2893 #ifdef __BIG_ENDIAN
2894         ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2895 #endif
2896         WREG32(IH_CNTL, ih_cntl);
2897
2898         /* force the active interrupt state to all disabled */
2899         if (rdev->family >= CHIP_CEDAR)
2900                 evergreen_disable_interrupt_state(rdev);
2901         else
2902                 r600_disable_interrupt_state(rdev);
2903
2904         /* enable irqs */
2905         r600_enable_interrupts(rdev);
2906
2907         return ret;
2908 }
2909
2910 void r600_irq_suspend(struct radeon_device *rdev)
2911 {
2912         r600_irq_disable(rdev);
2913         r600_rlc_stop(rdev);
2914 }
2915
2916 void r600_irq_fini(struct radeon_device *rdev)
2917 {
2918         r600_irq_suspend(rdev);
2919         r600_ih_ring_fini(rdev);
2920 }
2921
2922 int r600_irq_set(struct radeon_device *rdev)
2923 {
2924         u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2925         u32 mode_int = 0;
2926         u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2927         u32 grbm_int_cntl = 0;
2928         u32 hdmi1, hdmi2;
2929
2930         if (!rdev->irq.installed) {
2931                 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2932                 return -EINVAL;
2933         }
2934         /* don't enable anything if the ih is disabled */
2935         if (!rdev->ih.enabled) {
2936                 r600_disable_interrupts(rdev);
2937                 /* force the active interrupt state to all disabled */
2938                 r600_disable_interrupt_state(rdev);
2939                 return 0;
2940         }
2941
2942         hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2943         if (ASIC_IS_DCE3(rdev)) {
2944                 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2945                 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2946                 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2947                 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2948                 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2949                 if (ASIC_IS_DCE32(rdev)) {
2950                         hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2951                         hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2952                 }
2953         } else {
2954                 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2955                 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2956                 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2957                 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2958         }
2959
2960         if (rdev->irq.sw_int) {
2961                 DRM_DEBUG("r600_irq_set: sw int\n");
2962                 cp_int_cntl |= RB_INT_ENABLE;
2963         }
2964         if (rdev->irq.crtc_vblank_int[0]) {
2965                 DRM_DEBUG("r600_irq_set: vblank 0\n");
2966                 mode_int |= D1MODE_VBLANK_INT_MASK;
2967         }
2968         if (rdev->irq.crtc_vblank_int[1]) {
2969                 DRM_DEBUG("r600_irq_set: vblank 1\n");
2970                 mode_int |= D2MODE_VBLANK_INT_MASK;
2971         }
2972         if (rdev->irq.hpd[0]) {
2973                 DRM_DEBUG("r600_irq_set: hpd 1\n");
2974                 hpd1 |= DC_HPDx_INT_EN;
2975         }
2976         if (rdev->irq.hpd[1]) {
2977                 DRM_DEBUG("r600_irq_set: hpd 2\n");
2978                 hpd2 |= DC_HPDx_INT_EN;
2979         }
2980         if (rdev->irq.hpd[2]) {
2981                 DRM_DEBUG("r600_irq_set: hpd 3\n");
2982                 hpd3 |= DC_HPDx_INT_EN;
2983         }
2984         if (rdev->irq.hpd[3]) {
2985                 DRM_DEBUG("r600_irq_set: hpd 4\n");
2986                 hpd4 |= DC_HPDx_INT_EN;
2987         }
2988         if (rdev->irq.hpd[4]) {
2989                 DRM_DEBUG("r600_irq_set: hpd 5\n");
2990                 hpd5 |= DC_HPDx_INT_EN;
2991         }
2992         if (rdev->irq.hpd[5]) {
2993                 DRM_DEBUG("r600_irq_set: hpd 6\n");
2994                 hpd6 |= DC_HPDx_INT_EN;
2995         }
2996         if (rdev->irq.hdmi[0]) {
2997                 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2998                 hdmi1 |= R600_HDMI_INT_EN;
2999         }
3000         if (rdev->irq.hdmi[1]) {
3001                 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3002                 hdmi2 |= R600_HDMI_INT_EN;
3003         }
3004         if (rdev->irq.gui_idle) {
3005                 DRM_DEBUG("gui idle\n");
3006                 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3007         }
3008
3009         WREG32(CP_INT_CNTL, cp_int_cntl);
3010         WREG32(DxMODE_INT_MASK, mode_int);
3011         WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3012         WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3013         if (ASIC_IS_DCE3(rdev)) {
3014                 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3015                 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3016                 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3017                 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3018                 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3019                 if (ASIC_IS_DCE32(rdev)) {
3020                         WREG32(DC_HPD5_INT_CONTROL, hpd5);
3021                         WREG32(DC_HPD6_INT_CONTROL, hpd6);
3022                 }
3023         } else {
3024                 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3025                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3026                 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3027                 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3028         }
3029
3030         return 0;
3031 }
3032
3033 static inline void r600_irq_ack(struct radeon_device *rdev,
3034                                 u32 *disp_int,
3035                                 u32 *disp_int_cont,
3036                                 u32 *disp_int_cont2)
3037 {
3038         u32 tmp;
3039
3040         if (ASIC_IS_DCE3(rdev)) {
3041                 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3042                 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3043                 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3044         } else {
3045                 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
3046                 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3047                 *disp_int_cont2 = 0;
3048         }
3049
3050         if (*disp_int & LB_D1_VBLANK_INTERRUPT)
3051                 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3052         if (*disp_int & LB_D1_VLINE_INTERRUPT)
3053                 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3054         if (*disp_int & LB_D2_VBLANK_INTERRUPT)
3055                 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3056         if (*disp_int & LB_D2_VLINE_INTERRUPT)
3057                 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3058         if (*disp_int & DC_HPD1_INTERRUPT) {
3059                 if (ASIC_IS_DCE3(rdev)) {
3060                         tmp = RREG32(DC_HPD1_INT_CONTROL);
3061                         tmp |= DC_HPDx_INT_ACK;
3062                         WREG32(DC_HPD1_INT_CONTROL, tmp);
3063                 } else {
3064                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3065                         tmp |= DC_HPDx_INT_ACK;
3066                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3067                 }
3068         }
3069         if (*disp_int & DC_HPD2_INTERRUPT) {
3070                 if (ASIC_IS_DCE3(rdev)) {
3071                         tmp = RREG32(DC_HPD2_INT_CONTROL);
3072                         tmp |= DC_HPDx_INT_ACK;
3073                         WREG32(DC_HPD2_INT_CONTROL, tmp);
3074                 } else {
3075                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3076                         tmp |= DC_HPDx_INT_ACK;
3077                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3078                 }
3079         }
3080         if (*disp_int_cont & DC_HPD3_INTERRUPT) {
3081                 if (ASIC_IS_DCE3(rdev)) {
3082                         tmp = RREG32(DC_HPD3_INT_CONTROL);
3083                         tmp |= DC_HPDx_INT_ACK;
3084                         WREG32(DC_HPD3_INT_CONTROL, tmp);
3085                 } else {
3086                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3087                         tmp |= DC_HPDx_INT_ACK;
3088                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3089                 }
3090         }
3091         if (*disp_int_cont & DC_HPD4_INTERRUPT) {
3092                 tmp = RREG32(DC_HPD4_INT_CONTROL);
3093                 tmp |= DC_HPDx_INT_ACK;
3094                 WREG32(DC_HPD4_INT_CONTROL, tmp);
3095         }
3096         if (ASIC_IS_DCE32(rdev)) {
3097                 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
3098                         tmp = RREG32(DC_HPD5_INT_CONTROL);
3099                         tmp |= DC_HPDx_INT_ACK;
3100                         WREG32(DC_HPD5_INT_CONTROL, tmp);
3101                 }
3102                 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
3103                         tmp = RREG32(DC_HPD5_INT_CONTROL);
3104                         tmp |= DC_HPDx_INT_ACK;
3105                         WREG32(DC_HPD6_INT_CONTROL, tmp);
3106                 }
3107         }
3108         if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3109                 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3110         }
3111         if (ASIC_IS_DCE3(rdev)) {
3112                 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3113                         WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3114                 }
3115         } else {
3116                 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3117                         WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3118                 }
3119         }
3120 }
3121
3122 void r600_irq_disable(struct radeon_device *rdev)
3123 {
3124         u32 disp_int, disp_int_cont, disp_int_cont2;
3125
3126         r600_disable_interrupts(rdev);
3127         /* Wait and acknowledge irq */
3128         mdelay(1);
3129         r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3130         r600_disable_interrupt_state(rdev);
3131 }
3132
3133 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3134 {
3135         u32 wptr, tmp;
3136
3137         /* XXX use writeback */
3138         wptr = RREG32(IH_RB_WPTR);
3139
3140         if (wptr & RB_OVERFLOW) {
3141                 /* When a ring buffer overflow happen start parsing interrupt
3142                  * from the last not overwritten vector (wptr + 16). Hopefully
3143                  * this should allow us to catchup.
3144                  */
3145                 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3146                         wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3147                 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3148                 tmp = RREG32(IH_RB_CNTL);
3149                 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3150                 WREG32(IH_RB_CNTL, tmp);
3151         }
3152         return (wptr & rdev->ih.ptr_mask);
3153 }
3154
3155 /*        r600 IV Ring
3156  * Each IV ring entry is 128 bits:
3157  * [7:0]    - interrupt source id
3158  * [31:8]   - reserved
3159  * [59:32]  - interrupt source data
3160  * [127:60]  - reserved
3161  *
3162  * The basic interrupt vector entries
3163  * are decoded as follows:
3164  * src_id  src_data  description
3165  *      1         0  D1 Vblank
3166  *      1         1  D1 Vline
3167  *      5         0  D2 Vblank
3168  *      5         1  D2 Vline
3169  *     19         0  FP Hot plug detection A
3170  *     19         1  FP Hot plug detection B
3171  *     19         2  DAC A auto-detection
3172  *     19         3  DAC B auto-detection
3173  *     21         4  HDMI block A
3174  *     21         5  HDMI block B
3175  *    176         -  CP_INT RB
3176  *    177         -  CP_INT IB1
3177  *    178         -  CP_INT IB2
3178  *    181         -  EOP Interrupt
3179  *    233         -  GUI Idle
3180  *
3181  * Note, these are based on r600 and may need to be
3182  * adjusted or added to on newer asics
3183  */
3184
3185 int r600_irq_process(struct radeon_device *rdev)
3186 {
3187         u32 wptr = r600_get_ih_wptr(rdev);
3188         u32 rptr = rdev->ih.rptr;
3189         u32 src_id, src_data;
3190         u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3191         unsigned long flags;
3192         bool queue_hotplug = false;
3193
3194         DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3195         if (!rdev->ih.enabled)
3196                 return IRQ_NONE;
3197
3198         spin_lock_irqsave(&rdev->ih.lock, flags);
3199
3200         if (rptr == wptr) {
3201                 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3202                 return IRQ_NONE;
3203         }
3204         if (rdev->shutdown) {
3205                 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3206                 return IRQ_NONE;
3207         }
3208
3209 restart_ih:
3210         /* display interrupts */
3211         r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3212
3213         rdev->ih.wptr = wptr;
3214         while (rptr != wptr) {
3215                 /* wptr/rptr are in bytes! */
3216                 ring_index = rptr / 4;
3217                 src_id =  rdev->ih.ring[ring_index] & 0xff;
3218                 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3219
3220                 switch (src_id) {
3221                 case 1: /* D1 vblank/vline */
3222                         switch (src_data) {
3223                         case 0: /* D1 vblank */
3224                                 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3225                                         drm_handle_vblank(rdev->ddev, 0);
3226                                         rdev->pm.vblank_sync = true;
3227                                         wake_up(&rdev->irq.vblank_queue);
3228                                         disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3229                                         DRM_DEBUG("IH: D1 vblank\n");
3230                                 }
3231                                 break;
3232                         case 1: /* D1 vline */
3233                                 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3234                                         disp_int &= ~LB_D1_VLINE_INTERRUPT;
3235                                         DRM_DEBUG("IH: D1 vline\n");
3236                                 }
3237                                 break;
3238                         default:
3239                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3240                                 break;
3241                         }
3242                         break;
3243                 case 5: /* D2 vblank/vline */
3244                         switch (src_data) {
3245                         case 0: /* D2 vblank */
3246                                 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3247                                         drm_handle_vblank(rdev->ddev, 1);
3248                                         rdev->pm.vblank_sync = true;
3249                                         wake_up(&rdev->irq.vblank_queue);
3250                                         disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3251                                         DRM_DEBUG("IH: D2 vblank\n");
3252                                 }
3253                                 break;
3254                         case 1: /* D1 vline */
3255                                 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3256                                         disp_int &= ~LB_D2_VLINE_INTERRUPT;
3257                                         DRM_DEBUG("IH: D2 vline\n");
3258                                 }
3259                                 break;
3260                         default:
3261                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3262                                 break;
3263                         }
3264                         break;
3265                 case 19: /* HPD/DAC hotplug */
3266                         switch (src_data) {
3267                         case 0:
3268                                 if (disp_int & DC_HPD1_INTERRUPT) {
3269                                         disp_int &= ~DC_HPD1_INTERRUPT;
3270                                         queue_hotplug = true;
3271                                         DRM_DEBUG("IH: HPD1\n");
3272                                 }
3273                                 break;
3274                         case 1:
3275                                 if (disp_int & DC_HPD2_INTERRUPT) {
3276                                         disp_int &= ~DC_HPD2_INTERRUPT;
3277                                         queue_hotplug = true;
3278                                         DRM_DEBUG("IH: HPD2\n");
3279                                 }
3280                                 break;
3281                         case 4:
3282                                 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3283                                         disp_int_cont &= ~DC_HPD3_INTERRUPT;
3284                                         queue_hotplug = true;
3285                                         DRM_DEBUG("IH: HPD3\n");
3286                                 }
3287                                 break;
3288                         case 5:
3289                                 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3290                                         disp_int_cont &= ~DC_HPD4_INTERRUPT;
3291                                         queue_hotplug = true;
3292                                         DRM_DEBUG("IH: HPD4\n");
3293                                 }
3294                                 break;
3295                         case 10:
3296                                 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3297                                         disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3298                                         queue_hotplug = true;
3299                                         DRM_DEBUG("IH: HPD5\n");
3300                                 }
3301                                 break;
3302                         case 12:
3303                                 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3304                                         disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3305                                         queue_hotplug = true;
3306                                         DRM_DEBUG("IH: HPD6\n");
3307                                 }
3308                                 break;
3309                         default:
3310                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3311                                 break;
3312                         }
3313                         break;
3314                 case 21: /* HDMI */
3315                         DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3316                         r600_audio_schedule_polling(rdev);
3317                         break;
3318                 case 176: /* CP_INT in ring buffer */
3319                 case 177: /* CP_INT in IB1 */
3320                 case 178: /* CP_INT in IB2 */
3321                         DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3322                         radeon_fence_process(rdev);
3323                         break;
3324                 case 181: /* CP EOP event */
3325                         DRM_DEBUG("IH: CP EOP\n");
3326                         break;
3327                 case 233: /* GUI IDLE */
3328                         DRM_DEBUG("IH: CP EOP\n");
3329                         rdev->pm.gui_idle = true;
3330                         wake_up(&rdev->irq.idle_queue);
3331                         break;
3332                 default:
3333                         DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3334                         break;
3335                 }
3336
3337                 /* wptr/rptr are in bytes! */
3338                 rptr += 16;
3339                 rptr &= rdev->ih.ptr_mask;
3340         }
3341         /* make sure wptr hasn't changed while processing */
3342         wptr = r600_get_ih_wptr(rdev);
3343         if (wptr != rdev->ih.wptr)
3344                 goto restart_ih;
3345         if (queue_hotplug)
3346                 queue_work(rdev->wq, &rdev->hotplug_work);
3347         rdev->ih.rptr = rptr;
3348         WREG32(IH_RB_RPTR, rdev->ih.rptr);
3349         spin_unlock_irqrestore(&rdev->ih.lock, flags);
3350         return IRQ_HANDLED;
3351 }
3352
3353 /*
3354  * Debugfs info
3355  */
3356 #if defined(CONFIG_DEBUG_FS)
3357
3358 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3359 {
3360         struct drm_info_node *node = (struct drm_info_node *) m->private;
3361         struct drm_device *dev = node->minor->dev;
3362         struct radeon_device *rdev = dev->dev_private;
3363         unsigned count, i, j;
3364
3365         radeon_ring_free_size(rdev);
3366         count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3367         seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3368         seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3369         seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3370         seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3371         seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3372         seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3373         seq_printf(m, "%u dwords in ring\n", count);
3374         i = rdev->cp.rptr;
3375         for (j = 0; j <= count; j++) {
3376                 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3377                 i = (i + 1) & rdev->cp.ptr_mask;
3378         }
3379         return 0;
3380 }
3381
3382 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3383 {
3384         struct drm_info_node *node = (struct drm_info_node *) m->private;
3385         struct drm_device *dev = node->minor->dev;
3386         struct radeon_device *rdev = dev->dev_private;
3387
3388         DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3389         DREG32_SYS(m, rdev, VM_L2_STATUS);
3390         return 0;
3391 }
3392
3393 static struct drm_info_list r600_mc_info_list[] = {
3394         {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3395         {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3396 };
3397 #endif
3398
3399 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3400 {
3401 #if defined(CONFIG_DEBUG_FS)
3402         return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3403 #else
3404         return 0;
3405 #endif
3406 }
3407
3408 /**
3409  * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3410  * rdev: radeon device structure
3411  * bo: buffer object struct which userspace is waiting for idle
3412  *
3413  * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3414  * through ring buffer, this leads to corruption in rendering, see
3415  * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3416  * directly perform HDP flush by writing register through MMIO.
3417  */
3418 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3419 {
3420         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3421 }