Merge branch 'drm-fbdev-cleanup' into drm-core-next
[pandora-kernel.git] / drivers / gpu / drm / radeon / r600.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include "drmP.h"
33 #include "radeon_drm.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47
48 /* Firmware Names */
49 MODULE_FIRMWARE("radeon/R600_pfp.bin");
50 MODULE_FIRMWARE("radeon/R600_me.bin");
51 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV610_me.bin");
53 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV630_me.bin");
55 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
56 MODULE_FIRMWARE("radeon/RV620_me.bin");
57 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV635_me.bin");
59 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV670_me.bin");
61 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
62 MODULE_FIRMWARE("radeon/RS780_me.bin");
63 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV770_me.bin");
65 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
66 MODULE_FIRMWARE("radeon/RV730_me.bin");
67 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
68 MODULE_FIRMWARE("radeon/RV710_me.bin");
69 MODULE_FIRMWARE("radeon/R600_rlc.bin");
70 MODULE_FIRMWARE("radeon/R700_rlc.bin");
71
72 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
73
74 /* r600,rv610,rv630,rv620,rv635,rv670 */
75 int r600_mc_wait_for_idle(struct radeon_device *rdev);
76 void r600_gpu_init(struct radeon_device *rdev);
77 void r600_fini(struct radeon_device *rdev);
78
79 /* hpd for digital panel detect/disconnect */
80 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
81 {
82         bool connected = false;
83
84         if (ASIC_IS_DCE3(rdev)) {
85                 switch (hpd) {
86                 case RADEON_HPD_1:
87                         if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
88                                 connected = true;
89                         break;
90                 case RADEON_HPD_2:
91                         if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
92                                 connected = true;
93                         break;
94                 case RADEON_HPD_3:
95                         if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
96                                 connected = true;
97                         break;
98                 case RADEON_HPD_4:
99                         if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
100                                 connected = true;
101                         break;
102                         /* DCE 3.2 */
103                 case RADEON_HPD_5:
104                         if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
105                                 connected = true;
106                         break;
107                 case RADEON_HPD_6:
108                         if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
109                                 connected = true;
110                         break;
111                 default:
112                         break;
113                 }
114         } else {
115                 switch (hpd) {
116                 case RADEON_HPD_1:
117                         if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
118                                 connected = true;
119                         break;
120                 case RADEON_HPD_2:
121                         if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
122                                 connected = true;
123                         break;
124                 case RADEON_HPD_3:
125                         if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
126                                 connected = true;
127                         break;
128                 default:
129                         break;
130                 }
131         }
132         return connected;
133 }
134
135 void r600_hpd_set_polarity(struct radeon_device *rdev,
136                            enum radeon_hpd_id hpd)
137 {
138         u32 tmp;
139         bool connected = r600_hpd_sense(rdev, hpd);
140
141         if (ASIC_IS_DCE3(rdev)) {
142                 switch (hpd) {
143                 case RADEON_HPD_1:
144                         tmp = RREG32(DC_HPD1_INT_CONTROL);
145                         if (connected)
146                                 tmp &= ~DC_HPDx_INT_POLARITY;
147                         else
148                                 tmp |= DC_HPDx_INT_POLARITY;
149                         WREG32(DC_HPD1_INT_CONTROL, tmp);
150                         break;
151                 case RADEON_HPD_2:
152                         tmp = RREG32(DC_HPD2_INT_CONTROL);
153                         if (connected)
154                                 tmp &= ~DC_HPDx_INT_POLARITY;
155                         else
156                                 tmp |= DC_HPDx_INT_POLARITY;
157                         WREG32(DC_HPD2_INT_CONTROL, tmp);
158                         break;
159                 case RADEON_HPD_3:
160                         tmp = RREG32(DC_HPD3_INT_CONTROL);
161                         if (connected)
162                                 tmp &= ~DC_HPDx_INT_POLARITY;
163                         else
164                                 tmp |= DC_HPDx_INT_POLARITY;
165                         WREG32(DC_HPD3_INT_CONTROL, tmp);
166                         break;
167                 case RADEON_HPD_4:
168                         tmp = RREG32(DC_HPD4_INT_CONTROL);
169                         if (connected)
170                                 tmp &= ~DC_HPDx_INT_POLARITY;
171                         else
172                                 tmp |= DC_HPDx_INT_POLARITY;
173                         WREG32(DC_HPD4_INT_CONTROL, tmp);
174                         break;
175                 case RADEON_HPD_5:
176                         tmp = RREG32(DC_HPD5_INT_CONTROL);
177                         if (connected)
178                                 tmp &= ~DC_HPDx_INT_POLARITY;
179                         else
180                                 tmp |= DC_HPDx_INT_POLARITY;
181                         WREG32(DC_HPD5_INT_CONTROL, tmp);
182                         break;
183                         /* DCE 3.2 */
184                 case RADEON_HPD_6:
185                         tmp = RREG32(DC_HPD6_INT_CONTROL);
186                         if (connected)
187                                 tmp &= ~DC_HPDx_INT_POLARITY;
188                         else
189                                 tmp |= DC_HPDx_INT_POLARITY;
190                         WREG32(DC_HPD6_INT_CONTROL, tmp);
191                         break;
192                 default:
193                         break;
194                 }
195         } else {
196                 switch (hpd) {
197                 case RADEON_HPD_1:
198                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
199                         if (connected)
200                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
201                         else
202                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
203                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
204                         break;
205                 case RADEON_HPD_2:
206                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
207                         if (connected)
208                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
209                         else
210                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
211                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
212                         break;
213                 case RADEON_HPD_3:
214                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
215                         if (connected)
216                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
217                         else
218                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
219                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
220                         break;
221                 default:
222                         break;
223                 }
224         }
225 }
226
227 void r600_hpd_init(struct radeon_device *rdev)
228 {
229         struct drm_device *dev = rdev->ddev;
230         struct drm_connector *connector;
231
232         if (ASIC_IS_DCE3(rdev)) {
233                 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
234                 if (ASIC_IS_DCE32(rdev))
235                         tmp |= DC_HPDx_EN;
236
237                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
238                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
239                         switch (radeon_connector->hpd.hpd) {
240                         case RADEON_HPD_1:
241                                 WREG32(DC_HPD1_CONTROL, tmp);
242                                 rdev->irq.hpd[0] = true;
243                                 break;
244                         case RADEON_HPD_2:
245                                 WREG32(DC_HPD2_CONTROL, tmp);
246                                 rdev->irq.hpd[1] = true;
247                                 break;
248                         case RADEON_HPD_3:
249                                 WREG32(DC_HPD3_CONTROL, tmp);
250                                 rdev->irq.hpd[2] = true;
251                                 break;
252                         case RADEON_HPD_4:
253                                 WREG32(DC_HPD4_CONTROL, tmp);
254                                 rdev->irq.hpd[3] = true;
255                                 break;
256                                 /* DCE 3.2 */
257                         case RADEON_HPD_5:
258                                 WREG32(DC_HPD5_CONTROL, tmp);
259                                 rdev->irq.hpd[4] = true;
260                                 break;
261                         case RADEON_HPD_6:
262                                 WREG32(DC_HPD6_CONTROL, tmp);
263                                 rdev->irq.hpd[5] = true;
264                                 break;
265                         default:
266                                 break;
267                         }
268                 }
269         } else {
270                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
271                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
272                         switch (radeon_connector->hpd.hpd) {
273                         case RADEON_HPD_1:
274                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
275                                 rdev->irq.hpd[0] = true;
276                                 break;
277                         case RADEON_HPD_2:
278                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
279                                 rdev->irq.hpd[1] = true;
280                                 break;
281                         case RADEON_HPD_3:
282                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
283                                 rdev->irq.hpd[2] = true;
284                                 break;
285                         default:
286                                 break;
287                         }
288                 }
289         }
290         if (rdev->irq.installed)
291                 r600_irq_set(rdev);
292 }
293
294 void r600_hpd_fini(struct radeon_device *rdev)
295 {
296         struct drm_device *dev = rdev->ddev;
297         struct drm_connector *connector;
298
299         if (ASIC_IS_DCE3(rdev)) {
300                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
301                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
302                         switch (radeon_connector->hpd.hpd) {
303                         case RADEON_HPD_1:
304                                 WREG32(DC_HPD1_CONTROL, 0);
305                                 rdev->irq.hpd[0] = false;
306                                 break;
307                         case RADEON_HPD_2:
308                                 WREG32(DC_HPD2_CONTROL, 0);
309                                 rdev->irq.hpd[1] = false;
310                                 break;
311                         case RADEON_HPD_3:
312                                 WREG32(DC_HPD3_CONTROL, 0);
313                                 rdev->irq.hpd[2] = false;
314                                 break;
315                         case RADEON_HPD_4:
316                                 WREG32(DC_HPD4_CONTROL, 0);
317                                 rdev->irq.hpd[3] = false;
318                                 break;
319                                 /* DCE 3.2 */
320                         case RADEON_HPD_5:
321                                 WREG32(DC_HPD5_CONTROL, 0);
322                                 rdev->irq.hpd[4] = false;
323                                 break;
324                         case RADEON_HPD_6:
325                                 WREG32(DC_HPD6_CONTROL, 0);
326                                 rdev->irq.hpd[5] = false;
327                                 break;
328                         default:
329                                 break;
330                         }
331                 }
332         } else {
333                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
334                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
335                         switch (radeon_connector->hpd.hpd) {
336                         case RADEON_HPD_1:
337                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
338                                 rdev->irq.hpd[0] = false;
339                                 break;
340                         case RADEON_HPD_2:
341                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
342                                 rdev->irq.hpd[1] = false;
343                                 break;
344                         case RADEON_HPD_3:
345                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
346                                 rdev->irq.hpd[2] = false;
347                                 break;
348                         default:
349                                 break;
350                         }
351                 }
352         }
353 }
354
355 /*
356  * R600 PCIE GART
357  */
358 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
359 {
360         unsigned i;
361         u32 tmp;
362
363         /* flush hdp cache so updates hit vram */
364         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
365
366         WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
367         WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
368         WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
369         for (i = 0; i < rdev->usec_timeout; i++) {
370                 /* read MC_STATUS */
371                 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
372                 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
373                 if (tmp == 2) {
374                         printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
375                         return;
376                 }
377                 if (tmp) {
378                         return;
379                 }
380                 udelay(1);
381         }
382 }
383
384 int r600_pcie_gart_init(struct radeon_device *rdev)
385 {
386         int r;
387
388         if (rdev->gart.table.vram.robj) {
389                 WARN(1, "R600 PCIE GART already initialized.\n");
390                 return 0;
391         }
392         /* Initialize common gart structure */
393         r = radeon_gart_init(rdev);
394         if (r)
395                 return r;
396         rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
397         return radeon_gart_table_vram_alloc(rdev);
398 }
399
400 int r600_pcie_gart_enable(struct radeon_device *rdev)
401 {
402         u32 tmp;
403         int r, i;
404
405         if (rdev->gart.table.vram.robj == NULL) {
406                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
407                 return -EINVAL;
408         }
409         r = radeon_gart_table_vram_pin(rdev);
410         if (r)
411                 return r;
412         radeon_gart_restore(rdev);
413
414         /* Setup L2 cache */
415         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
416                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
417                                 EFFECTIVE_L2_QUEUE_SIZE(7));
418         WREG32(VM_L2_CNTL2, 0);
419         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
420         /* Setup TLB control */
421         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
422                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
423                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
424                 ENABLE_WAIT_L2_QUERY;
425         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
426         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
427         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
428         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
429         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
430         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
431         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
432         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
433         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
434         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
435         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
436         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
437         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
438         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
439         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
440         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
441         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
442         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
443                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
444         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
445                         (u32)(rdev->dummy_page.addr >> 12));
446         for (i = 1; i < 7; i++)
447                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
448
449         r600_pcie_gart_tlb_flush(rdev);
450         rdev->gart.ready = true;
451         return 0;
452 }
453
454 void r600_pcie_gart_disable(struct radeon_device *rdev)
455 {
456         u32 tmp;
457         int i, r;
458
459         /* Disable all tables */
460         for (i = 0; i < 7; i++)
461                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
462
463         /* Disable L2 cache */
464         WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
465                                 EFFECTIVE_L2_QUEUE_SIZE(7));
466         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
467         /* Setup L1 TLB control */
468         tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
469                 ENABLE_WAIT_L2_QUERY;
470         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
471         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
472         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
473         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
474         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
475         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
476         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
477         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
478         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
479         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
480         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
481         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
482         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
483         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
484         if (rdev->gart.table.vram.robj) {
485                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
486                 if (likely(r == 0)) {
487                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
488                         radeon_bo_unpin(rdev->gart.table.vram.robj);
489                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
490                 }
491         }
492 }
493
494 void r600_pcie_gart_fini(struct radeon_device *rdev)
495 {
496         radeon_gart_fini(rdev);
497         r600_pcie_gart_disable(rdev);
498         radeon_gart_table_vram_free(rdev);
499 }
500
501 void r600_agp_enable(struct radeon_device *rdev)
502 {
503         u32 tmp;
504         int i;
505
506         /* Setup L2 cache */
507         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
508                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
509                                 EFFECTIVE_L2_QUEUE_SIZE(7));
510         WREG32(VM_L2_CNTL2, 0);
511         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
512         /* Setup TLB control */
513         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
514                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
515                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
516                 ENABLE_WAIT_L2_QUERY;
517         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
518         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
519         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
520         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
521         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
522         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
523         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
524         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
525         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
526         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
527         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
528         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
529         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
530         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
531         for (i = 0; i < 7; i++)
532                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
533 }
534
535 int r600_mc_wait_for_idle(struct radeon_device *rdev)
536 {
537         unsigned i;
538         u32 tmp;
539
540         for (i = 0; i < rdev->usec_timeout; i++) {
541                 /* read MC_STATUS */
542                 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
543                 if (!tmp)
544                         return 0;
545                 udelay(1);
546         }
547         return -1;
548 }
549
550 static void r600_mc_program(struct radeon_device *rdev)
551 {
552         struct rv515_mc_save save;
553         u32 tmp;
554         int i, j;
555
556         /* Initialize HDP */
557         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
558                 WREG32((0x2c14 + j), 0x00000000);
559                 WREG32((0x2c18 + j), 0x00000000);
560                 WREG32((0x2c1c + j), 0x00000000);
561                 WREG32((0x2c20 + j), 0x00000000);
562                 WREG32((0x2c24 + j), 0x00000000);
563         }
564         WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
565
566         rv515_mc_stop(rdev, &save);
567         if (r600_mc_wait_for_idle(rdev)) {
568                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
569         }
570         /* Lockout access through VGA aperture (doesn't exist before R600) */
571         WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
572         /* Update configuration */
573         if (rdev->flags & RADEON_IS_AGP) {
574                 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
575                         /* VRAM before AGP */
576                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
577                                 rdev->mc.vram_start >> 12);
578                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
579                                 rdev->mc.gtt_end >> 12);
580                 } else {
581                         /* VRAM after AGP */
582                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
583                                 rdev->mc.gtt_start >> 12);
584                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
585                                 rdev->mc.vram_end >> 12);
586                 }
587         } else {
588                 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
589                 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
590         }
591         WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
592         tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
593         tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
594         WREG32(MC_VM_FB_LOCATION, tmp);
595         WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
596         WREG32(HDP_NONSURFACE_INFO, (2 << 7));
597         WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
598         if (rdev->flags & RADEON_IS_AGP) {
599                 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
600                 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
601                 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
602         } else {
603                 WREG32(MC_VM_AGP_BASE, 0);
604                 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
605                 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
606         }
607         if (r600_mc_wait_for_idle(rdev)) {
608                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
609         }
610         rv515_mc_resume(rdev, &save);
611         /* we need to own VRAM, so turn off the VGA renderer here
612          * to stop it overwriting our objects */
613         rv515_vga_render_disable(rdev);
614 }
615
616 /**
617  * r600_vram_gtt_location - try to find VRAM & GTT location
618  * @rdev: radeon device structure holding all necessary informations
619  * @mc: memory controller structure holding memory informations
620  *
621  * Function will place try to place VRAM at same place as in CPU (PCI)
622  * address space as some GPU seems to have issue when we reprogram at
623  * different address space.
624  *
625  * If there is not enough space to fit the unvisible VRAM after the
626  * aperture then we limit the VRAM size to the aperture.
627  *
628  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
629  * them to be in one from GPU point of view so that we can program GPU to
630  * catch access outside them (weird GPU policy see ??).
631  *
632  * This function will never fails, worst case are limiting VRAM or GTT.
633  *
634  * Note: GTT start, end, size should be initialized before calling this
635  * function on AGP platform.
636  */
637 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
638 {
639         u64 size_bf, size_af;
640
641         if (mc->mc_vram_size > 0xE0000000) {
642                 /* leave room for at least 512M GTT */
643                 dev_warn(rdev->dev, "limiting VRAM\n");
644                 mc->real_vram_size = 0xE0000000;
645                 mc->mc_vram_size = 0xE0000000;
646         }
647         if (rdev->flags & RADEON_IS_AGP) {
648                 size_bf = mc->gtt_start;
649                 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
650                 if (size_bf > size_af) {
651                         if (mc->mc_vram_size > size_bf) {
652                                 dev_warn(rdev->dev, "limiting VRAM\n");
653                                 mc->real_vram_size = size_bf;
654                                 mc->mc_vram_size = size_bf;
655                         }
656                         mc->vram_start = mc->gtt_start - mc->mc_vram_size;
657                 } else {
658                         if (mc->mc_vram_size > size_af) {
659                                 dev_warn(rdev->dev, "limiting VRAM\n");
660                                 mc->real_vram_size = size_af;
661                                 mc->mc_vram_size = size_af;
662                         }
663                         mc->vram_start = mc->gtt_end;
664                 }
665                 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
666                 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
667                                 mc->mc_vram_size >> 20, mc->vram_start,
668                                 mc->vram_end, mc->real_vram_size >> 20);
669         } else {
670                 u64 base = 0;
671                 if (rdev->flags & RADEON_IS_IGP)
672                         base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
673                 radeon_vram_location(rdev, &rdev->mc, base);
674                 radeon_gtt_location(rdev, mc);
675         }
676 }
677
678 int r600_mc_init(struct radeon_device *rdev)
679 {
680         u32 tmp;
681         int chansize, numchan;
682
683         /* Get VRAM informations */
684         rdev->mc.vram_is_ddr = true;
685         tmp = RREG32(RAMCFG);
686         if (tmp & CHANSIZE_OVERRIDE) {
687                 chansize = 16;
688         } else if (tmp & CHANSIZE_MASK) {
689                 chansize = 64;
690         } else {
691                 chansize = 32;
692         }
693         tmp = RREG32(CHMAP);
694         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
695         case 0:
696         default:
697                 numchan = 1;
698                 break;
699         case 1:
700                 numchan = 2;
701                 break;
702         case 2:
703                 numchan = 4;
704                 break;
705         case 3:
706                 numchan = 8;
707                 break;
708         }
709         rdev->mc.vram_width = numchan * chansize;
710         /* Could aper size report 0 ? */
711         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
712         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
713         /* Setup GPU memory space */
714         rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
715         rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
716         rdev->mc.visible_vram_size = rdev->mc.aper_size;
717         /* FIXME remove this once we support unmappable VRAM */
718         if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
719                 rdev->mc.mc_vram_size = rdev->mc.aper_size;
720                 rdev->mc.real_vram_size = rdev->mc.aper_size;
721         }
722         r600_vram_gtt_location(rdev, &rdev->mc);
723
724         if (rdev->flags & RADEON_IS_IGP)
725                 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
726         radeon_update_bandwidth_info(rdev);
727         return 0;
728 }
729
730 /* We doesn't check that the GPU really needs a reset we simply do the
731  * reset, it's up to the caller to determine if the GPU needs one. We
732  * might add an helper function to check that.
733  */
734 int r600_gpu_soft_reset(struct radeon_device *rdev)
735 {
736         struct rv515_mc_save save;
737         u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
738                                 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
739                                 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
740                                 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
741                                 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
742                                 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
743                                 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
744                                 S_008010_GUI_ACTIVE(1);
745         u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
746                         S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
747                         S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
748                         S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
749                         S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
750                         S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
751                         S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
752                         S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
753         u32 tmp;
754
755         dev_info(rdev->dev, "GPU softreset \n");
756         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
757                 RREG32(R_008010_GRBM_STATUS));
758         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
759                 RREG32(R_008014_GRBM_STATUS2));
760         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
761                 RREG32(R_000E50_SRBM_STATUS));
762         rv515_mc_stop(rdev, &save);
763         if (r600_mc_wait_for_idle(rdev)) {
764                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
765         }
766         /* Disable CP parsing/prefetching */
767         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
768         /* Check if any of the rendering block is busy and reset it */
769         if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
770             (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
771                 tmp = S_008020_SOFT_RESET_CR(1) |
772                         S_008020_SOFT_RESET_DB(1) |
773                         S_008020_SOFT_RESET_CB(1) |
774                         S_008020_SOFT_RESET_PA(1) |
775                         S_008020_SOFT_RESET_SC(1) |
776                         S_008020_SOFT_RESET_SMX(1) |
777                         S_008020_SOFT_RESET_SPI(1) |
778                         S_008020_SOFT_RESET_SX(1) |
779                         S_008020_SOFT_RESET_SH(1) |
780                         S_008020_SOFT_RESET_TC(1) |
781                         S_008020_SOFT_RESET_TA(1) |
782                         S_008020_SOFT_RESET_VC(1) |
783                         S_008020_SOFT_RESET_VGT(1);
784                 dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
785                 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
786                 RREG32(R_008020_GRBM_SOFT_RESET);
787                 mdelay(15);
788                 WREG32(R_008020_GRBM_SOFT_RESET, 0);
789         }
790         /* Reset CP (we always reset CP) */
791         tmp = S_008020_SOFT_RESET_CP(1);
792         dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
793         WREG32(R_008020_GRBM_SOFT_RESET, tmp);
794         RREG32(R_008020_GRBM_SOFT_RESET);
795         mdelay(15);
796         WREG32(R_008020_GRBM_SOFT_RESET, 0);
797         /* Wait a little for things to settle down */
798         mdelay(1);
799         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
800                 RREG32(R_008010_GRBM_STATUS));
801         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
802                 RREG32(R_008014_GRBM_STATUS2));
803         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
804                 RREG32(R_000E50_SRBM_STATUS));
805         rv515_mc_resume(rdev, &save);
806         return 0;
807 }
808
809 bool r600_gpu_is_lockup(struct radeon_device *rdev)
810 {
811         u32 srbm_status;
812         u32 grbm_status;
813         u32 grbm_status2;
814         int r;
815
816         srbm_status = RREG32(R_000E50_SRBM_STATUS);
817         grbm_status = RREG32(R_008010_GRBM_STATUS);
818         grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
819         if (!G_008010_GUI_ACTIVE(grbm_status)) {
820                 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
821                 return false;
822         }
823         /* force CP activities */
824         r = radeon_ring_lock(rdev, 2);
825         if (!r) {
826                 /* PACKET2 NOP */
827                 radeon_ring_write(rdev, 0x80000000);
828                 radeon_ring_write(rdev, 0x80000000);
829                 radeon_ring_unlock_commit(rdev);
830         }
831         rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
832         return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
833 }
834
835 int r600_asic_reset(struct radeon_device *rdev)
836 {
837         return r600_gpu_soft_reset(rdev);
838 }
839
840 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
841                                              u32 num_backends,
842                                              u32 backend_disable_mask)
843 {
844         u32 backend_map = 0;
845         u32 enabled_backends_mask;
846         u32 enabled_backends_count;
847         u32 cur_pipe;
848         u32 swizzle_pipe[R6XX_MAX_PIPES];
849         u32 cur_backend;
850         u32 i;
851
852         if (num_tile_pipes > R6XX_MAX_PIPES)
853                 num_tile_pipes = R6XX_MAX_PIPES;
854         if (num_tile_pipes < 1)
855                 num_tile_pipes = 1;
856         if (num_backends > R6XX_MAX_BACKENDS)
857                 num_backends = R6XX_MAX_BACKENDS;
858         if (num_backends < 1)
859                 num_backends = 1;
860
861         enabled_backends_mask = 0;
862         enabled_backends_count = 0;
863         for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
864                 if (((backend_disable_mask >> i) & 1) == 0) {
865                         enabled_backends_mask |= (1 << i);
866                         ++enabled_backends_count;
867                 }
868                 if (enabled_backends_count == num_backends)
869                         break;
870         }
871
872         if (enabled_backends_count == 0) {
873                 enabled_backends_mask = 1;
874                 enabled_backends_count = 1;
875         }
876
877         if (enabled_backends_count != num_backends)
878                 num_backends = enabled_backends_count;
879
880         memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
881         switch (num_tile_pipes) {
882         case 1:
883                 swizzle_pipe[0] = 0;
884                 break;
885         case 2:
886                 swizzle_pipe[0] = 0;
887                 swizzle_pipe[1] = 1;
888                 break;
889         case 3:
890                 swizzle_pipe[0] = 0;
891                 swizzle_pipe[1] = 1;
892                 swizzle_pipe[2] = 2;
893                 break;
894         case 4:
895                 swizzle_pipe[0] = 0;
896                 swizzle_pipe[1] = 1;
897                 swizzle_pipe[2] = 2;
898                 swizzle_pipe[3] = 3;
899                 break;
900         case 5:
901                 swizzle_pipe[0] = 0;
902                 swizzle_pipe[1] = 1;
903                 swizzle_pipe[2] = 2;
904                 swizzle_pipe[3] = 3;
905                 swizzle_pipe[4] = 4;
906                 break;
907         case 6:
908                 swizzle_pipe[0] = 0;
909                 swizzle_pipe[1] = 2;
910                 swizzle_pipe[2] = 4;
911                 swizzle_pipe[3] = 5;
912                 swizzle_pipe[4] = 1;
913                 swizzle_pipe[5] = 3;
914                 break;
915         case 7:
916                 swizzle_pipe[0] = 0;
917                 swizzle_pipe[1] = 2;
918                 swizzle_pipe[2] = 4;
919                 swizzle_pipe[3] = 6;
920                 swizzle_pipe[4] = 1;
921                 swizzle_pipe[5] = 3;
922                 swizzle_pipe[6] = 5;
923                 break;
924         case 8:
925                 swizzle_pipe[0] = 0;
926                 swizzle_pipe[1] = 2;
927                 swizzle_pipe[2] = 4;
928                 swizzle_pipe[3] = 6;
929                 swizzle_pipe[4] = 1;
930                 swizzle_pipe[5] = 3;
931                 swizzle_pipe[6] = 5;
932                 swizzle_pipe[7] = 7;
933                 break;
934         }
935
936         cur_backend = 0;
937         for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
938                 while (((1 << cur_backend) & enabled_backends_mask) == 0)
939                         cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
940
941                 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
942
943                 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
944         }
945
946         return backend_map;
947 }
948
949 int r600_count_pipe_bits(uint32_t val)
950 {
951         int i, ret = 0;
952
953         for (i = 0; i < 32; i++) {
954                 ret += val & 1;
955                 val >>= 1;
956         }
957         return ret;
958 }
959
960 void r600_gpu_init(struct radeon_device *rdev)
961 {
962         u32 tiling_config;
963         u32 ramcfg;
964         u32 backend_map;
965         u32 cc_rb_backend_disable;
966         u32 cc_gc_shader_pipe_config;
967         u32 tmp;
968         int i, j;
969         u32 sq_config;
970         u32 sq_gpr_resource_mgmt_1 = 0;
971         u32 sq_gpr_resource_mgmt_2 = 0;
972         u32 sq_thread_resource_mgmt = 0;
973         u32 sq_stack_resource_mgmt_1 = 0;
974         u32 sq_stack_resource_mgmt_2 = 0;
975
976         /* FIXME: implement */
977         switch (rdev->family) {
978         case CHIP_R600:
979                 rdev->config.r600.max_pipes = 4;
980                 rdev->config.r600.max_tile_pipes = 8;
981                 rdev->config.r600.max_simds = 4;
982                 rdev->config.r600.max_backends = 4;
983                 rdev->config.r600.max_gprs = 256;
984                 rdev->config.r600.max_threads = 192;
985                 rdev->config.r600.max_stack_entries = 256;
986                 rdev->config.r600.max_hw_contexts = 8;
987                 rdev->config.r600.max_gs_threads = 16;
988                 rdev->config.r600.sx_max_export_size = 128;
989                 rdev->config.r600.sx_max_export_pos_size = 16;
990                 rdev->config.r600.sx_max_export_smx_size = 128;
991                 rdev->config.r600.sq_num_cf_insts = 2;
992                 break;
993         case CHIP_RV630:
994         case CHIP_RV635:
995                 rdev->config.r600.max_pipes = 2;
996                 rdev->config.r600.max_tile_pipes = 2;
997                 rdev->config.r600.max_simds = 3;
998                 rdev->config.r600.max_backends = 1;
999                 rdev->config.r600.max_gprs = 128;
1000                 rdev->config.r600.max_threads = 192;
1001                 rdev->config.r600.max_stack_entries = 128;
1002                 rdev->config.r600.max_hw_contexts = 8;
1003                 rdev->config.r600.max_gs_threads = 4;
1004                 rdev->config.r600.sx_max_export_size = 128;
1005                 rdev->config.r600.sx_max_export_pos_size = 16;
1006                 rdev->config.r600.sx_max_export_smx_size = 128;
1007                 rdev->config.r600.sq_num_cf_insts = 2;
1008                 break;
1009         case CHIP_RV610:
1010         case CHIP_RV620:
1011         case CHIP_RS780:
1012         case CHIP_RS880:
1013                 rdev->config.r600.max_pipes = 1;
1014                 rdev->config.r600.max_tile_pipes = 1;
1015                 rdev->config.r600.max_simds = 2;
1016                 rdev->config.r600.max_backends = 1;
1017                 rdev->config.r600.max_gprs = 128;
1018                 rdev->config.r600.max_threads = 192;
1019                 rdev->config.r600.max_stack_entries = 128;
1020                 rdev->config.r600.max_hw_contexts = 4;
1021                 rdev->config.r600.max_gs_threads = 4;
1022                 rdev->config.r600.sx_max_export_size = 128;
1023                 rdev->config.r600.sx_max_export_pos_size = 16;
1024                 rdev->config.r600.sx_max_export_smx_size = 128;
1025                 rdev->config.r600.sq_num_cf_insts = 1;
1026                 break;
1027         case CHIP_RV670:
1028                 rdev->config.r600.max_pipes = 4;
1029                 rdev->config.r600.max_tile_pipes = 4;
1030                 rdev->config.r600.max_simds = 4;
1031                 rdev->config.r600.max_backends = 4;
1032                 rdev->config.r600.max_gprs = 192;
1033                 rdev->config.r600.max_threads = 192;
1034                 rdev->config.r600.max_stack_entries = 256;
1035                 rdev->config.r600.max_hw_contexts = 8;
1036                 rdev->config.r600.max_gs_threads = 16;
1037                 rdev->config.r600.sx_max_export_size = 128;
1038                 rdev->config.r600.sx_max_export_pos_size = 16;
1039                 rdev->config.r600.sx_max_export_smx_size = 128;
1040                 rdev->config.r600.sq_num_cf_insts = 2;
1041                 break;
1042         default:
1043                 break;
1044         }
1045
1046         /* Initialize HDP */
1047         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1048                 WREG32((0x2c14 + j), 0x00000000);
1049                 WREG32((0x2c18 + j), 0x00000000);
1050                 WREG32((0x2c1c + j), 0x00000000);
1051                 WREG32((0x2c20 + j), 0x00000000);
1052                 WREG32((0x2c24 + j), 0x00000000);
1053         }
1054
1055         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1056
1057         /* Setup tiling */
1058         tiling_config = 0;
1059         ramcfg = RREG32(RAMCFG);
1060         switch (rdev->config.r600.max_tile_pipes) {
1061         case 1:
1062                 tiling_config |= PIPE_TILING(0);
1063                 break;
1064         case 2:
1065                 tiling_config |= PIPE_TILING(1);
1066                 break;
1067         case 4:
1068                 tiling_config |= PIPE_TILING(2);
1069                 break;
1070         case 8:
1071                 tiling_config |= PIPE_TILING(3);
1072                 break;
1073         default:
1074                 break;
1075         }
1076         rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1077         rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1078         tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1079         tiling_config |= GROUP_SIZE(0);
1080         rdev->config.r600.tiling_group_size = 256;
1081         tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1082         if (tmp > 3) {
1083                 tiling_config |= ROW_TILING(3);
1084                 tiling_config |= SAMPLE_SPLIT(3);
1085         } else {
1086                 tiling_config |= ROW_TILING(tmp);
1087                 tiling_config |= SAMPLE_SPLIT(tmp);
1088         }
1089         tiling_config |= BANK_SWAPS(1);
1090
1091         cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1092         cc_rb_backend_disable |=
1093                 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1094
1095         cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1096         cc_gc_shader_pipe_config |=
1097                 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1098         cc_gc_shader_pipe_config |=
1099                 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1100
1101         backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1102                                                         (R6XX_MAX_BACKENDS -
1103                                                          r600_count_pipe_bits((cc_rb_backend_disable &
1104                                                                                R6XX_MAX_BACKENDS_MASK) >> 16)),
1105                                                         (cc_rb_backend_disable >> 16));
1106
1107         tiling_config |= BACKEND_MAP(backend_map);
1108         WREG32(GB_TILING_CONFIG, tiling_config);
1109         WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1110         WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1111
1112         /* Setup pipes */
1113         WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1114         WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1115         WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1116
1117         tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1118         WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1119         WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1120
1121         /* Setup some CP states */
1122         WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1123         WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1124
1125         WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1126                              SYNC_WALKER | SYNC_ALIGNER));
1127         /* Setup various GPU states */
1128         if (rdev->family == CHIP_RV670)
1129                 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1130
1131         tmp = RREG32(SX_DEBUG_1);
1132         tmp |= SMX_EVENT_RELEASE;
1133         if ((rdev->family > CHIP_R600))
1134                 tmp |= ENABLE_NEW_SMX_ADDRESS;
1135         WREG32(SX_DEBUG_1, tmp);
1136
1137         if (((rdev->family) == CHIP_R600) ||
1138             ((rdev->family) == CHIP_RV630) ||
1139             ((rdev->family) == CHIP_RV610) ||
1140             ((rdev->family) == CHIP_RV620) ||
1141             ((rdev->family) == CHIP_RS780) ||
1142             ((rdev->family) == CHIP_RS880)) {
1143                 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1144         } else {
1145                 WREG32(DB_DEBUG, 0);
1146         }
1147         WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1148                                DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1149
1150         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1151         WREG32(VGT_NUM_INSTANCES, 0);
1152
1153         WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1154         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1155
1156         tmp = RREG32(SQ_MS_FIFO_SIZES);
1157         if (((rdev->family) == CHIP_RV610) ||
1158             ((rdev->family) == CHIP_RV620) ||
1159             ((rdev->family) == CHIP_RS780) ||
1160             ((rdev->family) == CHIP_RS880)) {
1161                 tmp = (CACHE_FIFO_SIZE(0xa) |
1162                        FETCH_FIFO_HIWATER(0xa) |
1163                        DONE_FIFO_HIWATER(0xe0) |
1164                        ALU_UPDATE_FIFO_HIWATER(0x8));
1165         } else if (((rdev->family) == CHIP_R600) ||
1166                    ((rdev->family) == CHIP_RV630)) {
1167                 tmp &= ~DONE_FIFO_HIWATER(0xff);
1168                 tmp |= DONE_FIFO_HIWATER(0x4);
1169         }
1170         WREG32(SQ_MS_FIFO_SIZES, tmp);
1171
1172         /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1173          * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1174          */
1175         sq_config = RREG32(SQ_CONFIG);
1176         sq_config &= ~(PS_PRIO(3) |
1177                        VS_PRIO(3) |
1178                        GS_PRIO(3) |
1179                        ES_PRIO(3));
1180         sq_config |= (DX9_CONSTS |
1181                       VC_ENABLE |
1182                       PS_PRIO(0) |
1183                       VS_PRIO(1) |
1184                       GS_PRIO(2) |
1185                       ES_PRIO(3));
1186
1187         if ((rdev->family) == CHIP_R600) {
1188                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1189                                           NUM_VS_GPRS(124) |
1190                                           NUM_CLAUSE_TEMP_GPRS(4));
1191                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1192                                           NUM_ES_GPRS(0));
1193                 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1194                                            NUM_VS_THREADS(48) |
1195                                            NUM_GS_THREADS(4) |
1196                                            NUM_ES_THREADS(4));
1197                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1198                                             NUM_VS_STACK_ENTRIES(128));
1199                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1200                                             NUM_ES_STACK_ENTRIES(0));
1201         } else if (((rdev->family) == CHIP_RV610) ||
1202                    ((rdev->family) == CHIP_RV620) ||
1203                    ((rdev->family) == CHIP_RS780) ||
1204                    ((rdev->family) == CHIP_RS880)) {
1205                 /* no vertex cache */
1206                 sq_config &= ~VC_ENABLE;
1207
1208                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1209                                           NUM_VS_GPRS(44) |
1210                                           NUM_CLAUSE_TEMP_GPRS(2));
1211                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1212                                           NUM_ES_GPRS(17));
1213                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1214                                            NUM_VS_THREADS(78) |
1215                                            NUM_GS_THREADS(4) |
1216                                            NUM_ES_THREADS(31));
1217                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1218                                             NUM_VS_STACK_ENTRIES(40));
1219                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1220                                             NUM_ES_STACK_ENTRIES(16));
1221         } else if (((rdev->family) == CHIP_RV630) ||
1222                    ((rdev->family) == CHIP_RV635)) {
1223                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1224                                           NUM_VS_GPRS(44) |
1225                                           NUM_CLAUSE_TEMP_GPRS(2));
1226                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1227                                           NUM_ES_GPRS(18));
1228                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1229                                            NUM_VS_THREADS(78) |
1230                                            NUM_GS_THREADS(4) |
1231                                            NUM_ES_THREADS(31));
1232                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1233                                             NUM_VS_STACK_ENTRIES(40));
1234                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1235                                             NUM_ES_STACK_ENTRIES(16));
1236         } else if ((rdev->family) == CHIP_RV670) {
1237                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1238                                           NUM_VS_GPRS(44) |
1239                                           NUM_CLAUSE_TEMP_GPRS(2));
1240                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1241                                           NUM_ES_GPRS(17));
1242                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1243                                            NUM_VS_THREADS(78) |
1244                                            NUM_GS_THREADS(4) |
1245                                            NUM_ES_THREADS(31));
1246                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1247                                             NUM_VS_STACK_ENTRIES(64));
1248                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1249                                             NUM_ES_STACK_ENTRIES(64));
1250         }
1251
1252         WREG32(SQ_CONFIG, sq_config);
1253         WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1254         WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1255         WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1256         WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1257         WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1258
1259         if (((rdev->family) == CHIP_RV610) ||
1260             ((rdev->family) == CHIP_RV620) ||
1261             ((rdev->family) == CHIP_RS780) ||
1262             ((rdev->family) == CHIP_RS880)) {
1263                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1264         } else {
1265                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1266         }
1267
1268         /* More default values. 2D/3D driver should adjust as needed */
1269         WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1270                                          S1_X(0x4) | S1_Y(0xc)));
1271         WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1272                                          S1_X(0x2) | S1_Y(0x2) |
1273                                          S2_X(0xa) | S2_Y(0x6) |
1274                                          S3_X(0x6) | S3_Y(0xa)));
1275         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1276                                              S1_X(0x4) | S1_Y(0xc) |
1277                                              S2_X(0x1) | S2_Y(0x6) |
1278                                              S3_X(0xa) | S3_Y(0xe)));
1279         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1280                                              S5_X(0x0) | S5_Y(0x0) |
1281                                              S6_X(0xb) | S6_Y(0x4) |
1282                                              S7_X(0x7) | S7_Y(0x8)));
1283
1284         WREG32(VGT_STRMOUT_EN, 0);
1285         tmp = rdev->config.r600.max_pipes * 16;
1286         switch (rdev->family) {
1287         case CHIP_RV610:
1288         case CHIP_RV620:
1289         case CHIP_RS780:
1290         case CHIP_RS880:
1291                 tmp += 32;
1292                 break;
1293         case CHIP_RV670:
1294                 tmp += 128;
1295                 break;
1296         default:
1297                 break;
1298         }
1299         if (tmp > 256) {
1300                 tmp = 256;
1301         }
1302         WREG32(VGT_ES_PER_GS, 128);
1303         WREG32(VGT_GS_PER_ES, tmp);
1304         WREG32(VGT_GS_PER_VS, 2);
1305         WREG32(VGT_GS_VERTEX_REUSE, 16);
1306
1307         /* more default values. 2D/3D driver should adjust as needed */
1308         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1309         WREG32(VGT_STRMOUT_EN, 0);
1310         WREG32(SX_MISC, 0);
1311         WREG32(PA_SC_MODE_CNTL, 0);
1312         WREG32(PA_SC_AA_CONFIG, 0);
1313         WREG32(PA_SC_LINE_STIPPLE, 0);
1314         WREG32(SPI_INPUT_Z, 0);
1315         WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1316         WREG32(CB_COLOR7_FRAG, 0);
1317
1318         /* Clear render buffer base addresses */
1319         WREG32(CB_COLOR0_BASE, 0);
1320         WREG32(CB_COLOR1_BASE, 0);
1321         WREG32(CB_COLOR2_BASE, 0);
1322         WREG32(CB_COLOR3_BASE, 0);
1323         WREG32(CB_COLOR4_BASE, 0);
1324         WREG32(CB_COLOR5_BASE, 0);
1325         WREG32(CB_COLOR6_BASE, 0);
1326         WREG32(CB_COLOR7_BASE, 0);
1327         WREG32(CB_COLOR7_FRAG, 0);
1328
1329         switch (rdev->family) {
1330         case CHIP_RV610:
1331         case CHIP_RV620:
1332         case CHIP_RS780:
1333         case CHIP_RS880:
1334                 tmp = TC_L2_SIZE(8);
1335                 break;
1336         case CHIP_RV630:
1337         case CHIP_RV635:
1338                 tmp = TC_L2_SIZE(4);
1339                 break;
1340         case CHIP_R600:
1341                 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1342                 break;
1343         default:
1344                 tmp = TC_L2_SIZE(0);
1345                 break;
1346         }
1347         WREG32(TC_CNTL, tmp);
1348
1349         tmp = RREG32(HDP_HOST_PATH_CNTL);
1350         WREG32(HDP_HOST_PATH_CNTL, tmp);
1351
1352         tmp = RREG32(ARB_POP);
1353         tmp |= ENABLE_TC128;
1354         WREG32(ARB_POP, tmp);
1355
1356         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1357         WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1358                                NUM_CLIP_SEQ(3)));
1359         WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1360 }
1361
1362
1363 /*
1364  * Indirect registers accessor
1365  */
1366 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1367 {
1368         u32 r;
1369
1370         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1371         (void)RREG32(PCIE_PORT_INDEX);
1372         r = RREG32(PCIE_PORT_DATA);
1373         return r;
1374 }
1375
1376 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1377 {
1378         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1379         (void)RREG32(PCIE_PORT_INDEX);
1380         WREG32(PCIE_PORT_DATA, (v));
1381         (void)RREG32(PCIE_PORT_DATA);
1382 }
1383
1384 /*
1385  * CP & Ring
1386  */
1387 void r600_cp_stop(struct radeon_device *rdev)
1388 {
1389         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1390 }
1391
1392 int r600_init_microcode(struct radeon_device *rdev)
1393 {
1394         struct platform_device *pdev;
1395         const char *chip_name;
1396         const char *rlc_chip_name;
1397         size_t pfp_req_size, me_req_size, rlc_req_size;
1398         char fw_name[30];
1399         int err;
1400
1401         DRM_DEBUG("\n");
1402
1403         pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1404         err = IS_ERR(pdev);
1405         if (err) {
1406                 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1407                 return -EINVAL;
1408         }
1409
1410         switch (rdev->family) {
1411         case CHIP_R600:
1412                 chip_name = "R600";
1413                 rlc_chip_name = "R600";
1414                 break;
1415         case CHIP_RV610:
1416                 chip_name = "RV610";
1417                 rlc_chip_name = "R600";
1418                 break;
1419         case CHIP_RV630:
1420                 chip_name = "RV630";
1421                 rlc_chip_name = "R600";
1422                 break;
1423         case CHIP_RV620:
1424                 chip_name = "RV620";
1425                 rlc_chip_name = "R600";
1426                 break;
1427         case CHIP_RV635:
1428                 chip_name = "RV635";
1429                 rlc_chip_name = "R600";
1430                 break;
1431         case CHIP_RV670:
1432                 chip_name = "RV670";
1433                 rlc_chip_name = "R600";
1434                 break;
1435         case CHIP_RS780:
1436         case CHIP_RS880:
1437                 chip_name = "RS780";
1438                 rlc_chip_name = "R600";
1439                 break;
1440         case CHIP_RV770:
1441                 chip_name = "RV770";
1442                 rlc_chip_name = "R700";
1443                 break;
1444         case CHIP_RV730:
1445         case CHIP_RV740:
1446                 chip_name = "RV730";
1447                 rlc_chip_name = "R700";
1448                 break;
1449         case CHIP_RV710:
1450                 chip_name = "RV710";
1451                 rlc_chip_name = "R700";
1452                 break;
1453         default: BUG();
1454         }
1455
1456         if (rdev->family >= CHIP_RV770) {
1457                 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1458                 me_req_size = R700_PM4_UCODE_SIZE * 4;
1459                 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1460         } else {
1461                 pfp_req_size = PFP_UCODE_SIZE * 4;
1462                 me_req_size = PM4_UCODE_SIZE * 12;
1463                 rlc_req_size = RLC_UCODE_SIZE * 4;
1464         }
1465
1466         DRM_INFO("Loading %s Microcode\n", chip_name);
1467
1468         snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1469         err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1470         if (err)
1471                 goto out;
1472         if (rdev->pfp_fw->size != pfp_req_size) {
1473                 printk(KERN_ERR
1474                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1475                        rdev->pfp_fw->size, fw_name);
1476                 err = -EINVAL;
1477                 goto out;
1478         }
1479
1480         snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1481         err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1482         if (err)
1483                 goto out;
1484         if (rdev->me_fw->size != me_req_size) {
1485                 printk(KERN_ERR
1486                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1487                        rdev->me_fw->size, fw_name);
1488                 err = -EINVAL;
1489         }
1490
1491         snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1492         err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1493         if (err)
1494                 goto out;
1495         if (rdev->rlc_fw->size != rlc_req_size) {
1496                 printk(KERN_ERR
1497                        "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1498                        rdev->rlc_fw->size, fw_name);
1499                 err = -EINVAL;
1500         }
1501
1502 out:
1503         platform_device_unregister(pdev);
1504
1505         if (err) {
1506                 if (err != -EINVAL)
1507                         printk(KERN_ERR
1508                                "r600_cp: Failed to load firmware \"%s\"\n",
1509                                fw_name);
1510                 release_firmware(rdev->pfp_fw);
1511                 rdev->pfp_fw = NULL;
1512                 release_firmware(rdev->me_fw);
1513                 rdev->me_fw = NULL;
1514                 release_firmware(rdev->rlc_fw);
1515                 rdev->rlc_fw = NULL;
1516         }
1517         return err;
1518 }
1519
1520 static int r600_cp_load_microcode(struct radeon_device *rdev)
1521 {
1522         const __be32 *fw_data;
1523         int i;
1524
1525         if (!rdev->me_fw || !rdev->pfp_fw)
1526                 return -EINVAL;
1527
1528         r600_cp_stop(rdev);
1529
1530         WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1531
1532         /* Reset cp */
1533         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1534         RREG32(GRBM_SOFT_RESET);
1535         mdelay(15);
1536         WREG32(GRBM_SOFT_RESET, 0);
1537
1538         WREG32(CP_ME_RAM_WADDR, 0);
1539
1540         fw_data = (const __be32 *)rdev->me_fw->data;
1541         WREG32(CP_ME_RAM_WADDR, 0);
1542         for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1543                 WREG32(CP_ME_RAM_DATA,
1544                        be32_to_cpup(fw_data++));
1545
1546         fw_data = (const __be32 *)rdev->pfp_fw->data;
1547         WREG32(CP_PFP_UCODE_ADDR, 0);
1548         for (i = 0; i < PFP_UCODE_SIZE; i++)
1549                 WREG32(CP_PFP_UCODE_DATA,
1550                        be32_to_cpup(fw_data++));
1551
1552         WREG32(CP_PFP_UCODE_ADDR, 0);
1553         WREG32(CP_ME_RAM_WADDR, 0);
1554         WREG32(CP_ME_RAM_RADDR, 0);
1555         return 0;
1556 }
1557
1558 int r600_cp_start(struct radeon_device *rdev)
1559 {
1560         int r;
1561         uint32_t cp_me;
1562
1563         r = radeon_ring_lock(rdev, 7);
1564         if (r) {
1565                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1566                 return r;
1567         }
1568         radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1569         radeon_ring_write(rdev, 0x1);
1570         if (rdev->family < CHIP_RV770) {
1571                 radeon_ring_write(rdev, 0x3);
1572                 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1573         } else {
1574                 radeon_ring_write(rdev, 0x0);
1575                 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1576         }
1577         radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1578         radeon_ring_write(rdev, 0);
1579         radeon_ring_write(rdev, 0);
1580         radeon_ring_unlock_commit(rdev);
1581
1582         cp_me = 0xff;
1583         WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1584         return 0;
1585 }
1586
1587 int r600_cp_resume(struct radeon_device *rdev)
1588 {
1589         u32 tmp;
1590         u32 rb_bufsz;
1591         int r;
1592
1593         /* Reset cp */
1594         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1595         RREG32(GRBM_SOFT_RESET);
1596         mdelay(15);
1597         WREG32(GRBM_SOFT_RESET, 0);
1598
1599         /* Set ring buffer size */
1600         rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1601         tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1602 #ifdef __BIG_ENDIAN
1603         tmp |= BUF_SWAP_32BIT;
1604 #endif
1605         WREG32(CP_RB_CNTL, tmp);
1606         WREG32(CP_SEM_WAIT_TIMER, 0x4);
1607
1608         /* Set the write pointer delay */
1609         WREG32(CP_RB_WPTR_DELAY, 0);
1610
1611         /* Initialize the ring buffer's read and write pointers */
1612         WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1613         WREG32(CP_RB_RPTR_WR, 0);
1614         WREG32(CP_RB_WPTR, 0);
1615         WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1616         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1617         mdelay(1);
1618         WREG32(CP_RB_CNTL, tmp);
1619
1620         WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1621         WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1622
1623         rdev->cp.rptr = RREG32(CP_RB_RPTR);
1624         rdev->cp.wptr = RREG32(CP_RB_WPTR);
1625
1626         r600_cp_start(rdev);
1627         rdev->cp.ready = true;
1628         r = radeon_ring_test(rdev);
1629         if (r) {
1630                 rdev->cp.ready = false;
1631                 return r;
1632         }
1633         return 0;
1634 }
1635
1636 void r600_cp_commit(struct radeon_device *rdev)
1637 {
1638         WREG32(CP_RB_WPTR, rdev->cp.wptr);
1639         (void)RREG32(CP_RB_WPTR);
1640 }
1641
1642 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1643 {
1644         u32 rb_bufsz;
1645
1646         /* Align ring size */
1647         rb_bufsz = drm_order(ring_size / 8);
1648         ring_size = (1 << (rb_bufsz + 1)) * 4;
1649         rdev->cp.ring_size = ring_size;
1650         rdev->cp.align_mask = 16 - 1;
1651 }
1652
1653 void r600_cp_fini(struct radeon_device *rdev)
1654 {
1655         r600_cp_stop(rdev);
1656         radeon_ring_fini(rdev);
1657 }
1658
1659
1660 /*
1661  * GPU scratch registers helpers function.
1662  */
1663 void r600_scratch_init(struct radeon_device *rdev)
1664 {
1665         int i;
1666
1667         rdev->scratch.num_reg = 7;
1668         for (i = 0; i < rdev->scratch.num_reg; i++) {
1669                 rdev->scratch.free[i] = true;
1670                 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1671         }
1672 }
1673
1674 int r600_ring_test(struct radeon_device *rdev)
1675 {
1676         uint32_t scratch;
1677         uint32_t tmp = 0;
1678         unsigned i;
1679         int r;
1680
1681         r = radeon_scratch_get(rdev, &scratch);
1682         if (r) {
1683                 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1684                 return r;
1685         }
1686         WREG32(scratch, 0xCAFEDEAD);
1687         r = radeon_ring_lock(rdev, 3);
1688         if (r) {
1689                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1690                 radeon_scratch_free(rdev, scratch);
1691                 return r;
1692         }
1693         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1694         radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1695         radeon_ring_write(rdev, 0xDEADBEEF);
1696         radeon_ring_unlock_commit(rdev);
1697         for (i = 0; i < rdev->usec_timeout; i++) {
1698                 tmp = RREG32(scratch);
1699                 if (tmp == 0xDEADBEEF)
1700                         break;
1701                 DRM_UDELAY(1);
1702         }
1703         if (i < rdev->usec_timeout) {
1704                 DRM_INFO("ring test succeeded in %d usecs\n", i);
1705         } else {
1706                 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1707                           scratch, tmp);
1708                 r = -EINVAL;
1709         }
1710         radeon_scratch_free(rdev, scratch);
1711         return r;
1712 }
1713
1714 void r600_wb_disable(struct radeon_device *rdev)
1715 {
1716         int r;
1717
1718         WREG32(SCRATCH_UMSK, 0);
1719         if (rdev->wb.wb_obj) {
1720                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1721                 if (unlikely(r != 0))
1722                         return;
1723                 radeon_bo_kunmap(rdev->wb.wb_obj);
1724                 radeon_bo_unpin(rdev->wb.wb_obj);
1725                 radeon_bo_unreserve(rdev->wb.wb_obj);
1726         }
1727 }
1728
1729 void r600_wb_fini(struct radeon_device *rdev)
1730 {
1731         r600_wb_disable(rdev);
1732         if (rdev->wb.wb_obj) {
1733                 radeon_bo_unref(&rdev->wb.wb_obj);
1734                 rdev->wb.wb = NULL;
1735                 rdev->wb.wb_obj = NULL;
1736         }
1737 }
1738
1739 int r600_wb_enable(struct radeon_device *rdev)
1740 {
1741         int r;
1742
1743         if (rdev->wb.wb_obj == NULL) {
1744                 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1745                                 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1746                 if (r) {
1747                         dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1748                         return r;
1749                 }
1750                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1751                 if (unlikely(r != 0)) {
1752                         r600_wb_fini(rdev);
1753                         return r;
1754                 }
1755                 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1756                                 &rdev->wb.gpu_addr);
1757                 if (r) {
1758                         radeon_bo_unreserve(rdev->wb.wb_obj);
1759                         dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1760                         r600_wb_fini(rdev);
1761                         return r;
1762                 }
1763                 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1764                 radeon_bo_unreserve(rdev->wb.wb_obj);
1765                 if (r) {
1766                         dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1767                         r600_wb_fini(rdev);
1768                         return r;
1769                 }
1770         }
1771         WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
1772         WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
1773         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
1774         WREG32(SCRATCH_UMSK, 0xff);
1775         return 0;
1776 }
1777
1778 void r600_fence_ring_emit(struct radeon_device *rdev,
1779                           struct radeon_fence *fence)
1780 {
1781         /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
1782
1783         radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1784         radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1785         /* wait for 3D idle clean */
1786         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1787         radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1788         radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1789         /* Emit fence sequence & fire IRQ */
1790         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1791         radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1792         radeon_ring_write(rdev, fence->seq);
1793         /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1794         radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1795         radeon_ring_write(rdev, RB_INT_STAT);
1796 }
1797
1798 int r600_copy_blit(struct radeon_device *rdev,
1799                    uint64_t src_offset, uint64_t dst_offset,
1800                    unsigned num_pages, struct radeon_fence *fence)
1801 {
1802         int r;
1803
1804         mutex_lock(&rdev->r600_blit.mutex);
1805         rdev->r600_blit.vb_ib = NULL;
1806         r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1807         if (r) {
1808                 if (rdev->r600_blit.vb_ib)
1809                         radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1810                 mutex_unlock(&rdev->r600_blit.mutex);
1811                 return r;
1812         }
1813         r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1814         r600_blit_done_copy(rdev, fence);
1815         mutex_unlock(&rdev->r600_blit.mutex);
1816         return 0;
1817 }
1818
1819 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1820                          uint32_t tiling_flags, uint32_t pitch,
1821                          uint32_t offset, uint32_t obj_size)
1822 {
1823         /* FIXME: implement */
1824         return 0;
1825 }
1826
1827 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1828 {
1829         /* FIXME: implement */
1830 }
1831
1832
1833 bool r600_card_posted(struct radeon_device *rdev)
1834 {
1835         uint32_t reg;
1836
1837         /* first check CRTCs */
1838         reg = RREG32(D1CRTC_CONTROL) |
1839                 RREG32(D2CRTC_CONTROL);
1840         if (reg & CRTC_EN)
1841                 return true;
1842
1843         /* then check MEM_SIZE, in case the crtcs are off */
1844         if (RREG32(CONFIG_MEMSIZE))
1845                 return true;
1846
1847         return false;
1848 }
1849
1850 int r600_startup(struct radeon_device *rdev)
1851 {
1852         int r;
1853
1854         if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1855                 r = r600_init_microcode(rdev);
1856                 if (r) {
1857                         DRM_ERROR("Failed to load firmware!\n");
1858                         return r;
1859                 }
1860         }
1861
1862         r600_mc_program(rdev);
1863         if (rdev->flags & RADEON_IS_AGP) {
1864                 r600_agp_enable(rdev);
1865         } else {
1866                 r = r600_pcie_gart_enable(rdev);
1867                 if (r)
1868                         return r;
1869         }
1870         r600_gpu_init(rdev);
1871         r = r600_blit_init(rdev);
1872         if (r) {
1873                 r600_blit_fini(rdev);
1874                 rdev->asic->copy = NULL;
1875                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1876         }
1877         /* pin copy shader into vram */
1878         if (rdev->r600_blit.shader_obj) {
1879                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1880                 if (unlikely(r != 0))
1881                         return r;
1882                 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1883                                 &rdev->r600_blit.shader_gpu_addr);
1884                 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1885                 if (r) {
1886                         dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1887                         return r;
1888                 }
1889         }
1890         /* Enable IRQ */
1891         r = r600_irq_init(rdev);
1892         if (r) {
1893                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1894                 radeon_irq_kms_fini(rdev);
1895                 return r;
1896         }
1897         r600_irq_set(rdev);
1898
1899         r = radeon_ring_init(rdev, rdev->cp.ring_size);
1900         if (r)
1901                 return r;
1902         r = r600_cp_load_microcode(rdev);
1903         if (r)
1904                 return r;
1905         r = r600_cp_resume(rdev);
1906         if (r)
1907                 return r;
1908         /* write back buffer are not vital so don't worry about failure */
1909         r600_wb_enable(rdev);
1910         return 0;
1911 }
1912
1913 void r600_vga_set_state(struct radeon_device *rdev, bool state)
1914 {
1915         uint32_t temp;
1916
1917         temp = RREG32(CONFIG_CNTL);
1918         if (state == false) {
1919                 temp &= ~(1<<0);
1920                 temp |= (1<<1);
1921         } else {
1922                 temp &= ~(1<<1);
1923         }
1924         WREG32(CONFIG_CNTL, temp);
1925 }
1926
1927 int r600_resume(struct radeon_device *rdev)
1928 {
1929         int r;
1930
1931         /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1932          * posting will perform necessary task to bring back GPU into good
1933          * shape.
1934          */
1935         /* post card */
1936         atom_asic_init(rdev->mode_info.atom_context);
1937         /* Initialize clocks */
1938         r = radeon_clocks_init(rdev);
1939         if (r) {
1940                 return r;
1941         }
1942
1943         r = r600_startup(rdev);
1944         if (r) {
1945                 DRM_ERROR("r600 startup failed on resume\n");
1946                 return r;
1947         }
1948
1949         r = r600_ib_test(rdev);
1950         if (r) {
1951                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1952                 return r;
1953         }
1954
1955         r = r600_audio_init(rdev);
1956         if (r) {
1957                 DRM_ERROR("radeon: audio resume failed\n");
1958                 return r;
1959         }
1960
1961         return r;
1962 }
1963
1964 int r600_suspend(struct radeon_device *rdev)
1965 {
1966         int r;
1967
1968         r600_audio_fini(rdev);
1969         /* FIXME: we should wait for ring to be empty */
1970         r600_cp_stop(rdev);
1971         rdev->cp.ready = false;
1972         r600_irq_suspend(rdev);
1973         r600_wb_disable(rdev);
1974         r600_pcie_gart_disable(rdev);
1975         /* unpin shaders bo */
1976         if (rdev->r600_blit.shader_obj) {
1977                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1978                 if (!r) {
1979                         radeon_bo_unpin(rdev->r600_blit.shader_obj);
1980                         radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1981                 }
1982         }
1983         return 0;
1984 }
1985
1986 /* Plan is to move initialization in that function and use
1987  * helper function so that radeon_device_init pretty much
1988  * do nothing more than calling asic specific function. This
1989  * should also allow to remove a bunch of callback function
1990  * like vram_info.
1991  */
1992 int r600_init(struct radeon_device *rdev)
1993 {
1994         int r;
1995
1996         r = radeon_dummy_page_init(rdev);
1997         if (r)
1998                 return r;
1999         if (r600_debugfs_mc_info_init(rdev)) {
2000                 DRM_ERROR("Failed to register debugfs file for mc !\n");
2001         }
2002         /* This don't do much */
2003         r = radeon_gem_init(rdev);
2004         if (r)
2005                 return r;
2006         /* Read BIOS */
2007         if (!radeon_get_bios(rdev)) {
2008                 if (ASIC_IS_AVIVO(rdev))
2009                         return -EINVAL;
2010         }
2011         /* Must be an ATOMBIOS */
2012         if (!rdev->is_atom_bios) {
2013                 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2014                 return -EINVAL;
2015         }
2016         r = radeon_atombios_init(rdev);
2017         if (r)
2018                 return r;
2019         /* Post card if necessary */
2020         if (!r600_card_posted(rdev)) {
2021                 if (!rdev->bios) {
2022                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2023                         return -EINVAL;
2024                 }
2025                 DRM_INFO("GPU not posted. posting now...\n");
2026                 atom_asic_init(rdev->mode_info.atom_context);
2027         }
2028         /* Initialize scratch registers */
2029         r600_scratch_init(rdev);
2030         /* Initialize surface registers */
2031         radeon_surface_init(rdev);
2032         /* Initialize clocks */
2033         radeon_get_clock_info(rdev->ddev);
2034         r = radeon_clocks_init(rdev);
2035         if (r)
2036                 return r;
2037         /* Initialize power management */
2038         radeon_pm_init(rdev);
2039         /* Fence driver */
2040         r = radeon_fence_driver_init(rdev);
2041         if (r)
2042                 return r;
2043         if (rdev->flags & RADEON_IS_AGP) {
2044                 r = radeon_agp_init(rdev);
2045                 if (r)
2046                         radeon_agp_disable(rdev);
2047         }
2048         r = r600_mc_init(rdev);
2049         if (r)
2050                 return r;
2051         /* Memory manager */
2052         r = radeon_bo_init(rdev);
2053         if (r)
2054                 return r;
2055
2056         r = radeon_irq_kms_init(rdev);
2057         if (r)
2058                 return r;
2059
2060         rdev->cp.ring_obj = NULL;
2061         r600_ring_init(rdev, 1024 * 1024);
2062
2063         rdev->ih.ring_obj = NULL;
2064         r600_ih_ring_init(rdev, 64 * 1024);
2065
2066         r = r600_pcie_gart_init(rdev);
2067         if (r)
2068                 return r;
2069
2070         rdev->accel_working = true;
2071         r = r600_startup(rdev);
2072         if (r) {
2073                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2074                 r600_cp_fini(rdev);
2075                 r600_wb_fini(rdev);
2076                 r600_irq_fini(rdev);
2077                 radeon_irq_kms_fini(rdev);
2078                 r600_pcie_gart_fini(rdev);
2079                 rdev->accel_working = false;
2080         }
2081         if (rdev->accel_working) {
2082                 r = radeon_ib_pool_init(rdev);
2083                 if (r) {
2084                         dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2085                         rdev->accel_working = false;
2086                 } else {
2087                         r = r600_ib_test(rdev);
2088                         if (r) {
2089                                 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2090                                 rdev->accel_working = false;
2091                         }
2092                 }
2093         }
2094
2095         r = r600_audio_init(rdev);
2096         if (r)
2097                 return r; /* TODO error handling */
2098         return 0;
2099 }
2100
2101 void r600_fini(struct radeon_device *rdev)
2102 {
2103         radeon_pm_fini(rdev);
2104         r600_audio_fini(rdev);
2105         r600_blit_fini(rdev);
2106         r600_cp_fini(rdev);
2107         r600_wb_fini(rdev);
2108         r600_irq_fini(rdev);
2109         radeon_irq_kms_fini(rdev);
2110         r600_pcie_gart_fini(rdev);
2111         radeon_agp_fini(rdev);
2112         radeon_gem_fini(rdev);
2113         radeon_fence_driver_fini(rdev);
2114         radeon_clocks_fini(rdev);
2115         radeon_bo_fini(rdev);
2116         radeon_atombios_fini(rdev);
2117         kfree(rdev->bios);
2118         rdev->bios = NULL;
2119         radeon_dummy_page_fini(rdev);
2120 }
2121
2122
2123 /*
2124  * CS stuff
2125  */
2126 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2127 {
2128         /* FIXME: implement */
2129         radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2130         radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2131         radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2132         radeon_ring_write(rdev, ib->length_dw);
2133 }
2134
2135 int r600_ib_test(struct radeon_device *rdev)
2136 {
2137         struct radeon_ib *ib;
2138         uint32_t scratch;
2139         uint32_t tmp = 0;
2140         unsigned i;
2141         int r;
2142
2143         r = radeon_scratch_get(rdev, &scratch);
2144         if (r) {
2145                 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2146                 return r;
2147         }
2148         WREG32(scratch, 0xCAFEDEAD);
2149         r = radeon_ib_get(rdev, &ib);
2150         if (r) {
2151                 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2152                 return r;
2153         }
2154         ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2155         ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2156         ib->ptr[2] = 0xDEADBEEF;
2157         ib->ptr[3] = PACKET2(0);
2158         ib->ptr[4] = PACKET2(0);
2159         ib->ptr[5] = PACKET2(0);
2160         ib->ptr[6] = PACKET2(0);
2161         ib->ptr[7] = PACKET2(0);
2162         ib->ptr[8] = PACKET2(0);
2163         ib->ptr[9] = PACKET2(0);
2164         ib->ptr[10] = PACKET2(0);
2165         ib->ptr[11] = PACKET2(0);
2166         ib->ptr[12] = PACKET2(0);
2167         ib->ptr[13] = PACKET2(0);
2168         ib->ptr[14] = PACKET2(0);
2169         ib->ptr[15] = PACKET2(0);
2170         ib->length_dw = 16;
2171         r = radeon_ib_schedule(rdev, ib);
2172         if (r) {
2173                 radeon_scratch_free(rdev, scratch);
2174                 radeon_ib_free(rdev, &ib);
2175                 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2176                 return r;
2177         }
2178         r = radeon_fence_wait(ib->fence, false);
2179         if (r) {
2180                 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2181                 return r;
2182         }
2183         for (i = 0; i < rdev->usec_timeout; i++) {
2184                 tmp = RREG32(scratch);
2185                 if (tmp == 0xDEADBEEF)
2186                         break;
2187                 DRM_UDELAY(1);
2188         }
2189         if (i < rdev->usec_timeout) {
2190                 DRM_INFO("ib test succeeded in %u usecs\n", i);
2191         } else {
2192                 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2193                           scratch, tmp);
2194                 r = -EINVAL;
2195         }
2196         radeon_scratch_free(rdev, scratch);
2197         radeon_ib_free(rdev, &ib);
2198         return r;
2199 }
2200
2201 /*
2202  * Interrupts
2203  *
2204  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2205  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2206  * writing to the ring and the GPU consuming, the GPU writes to the ring
2207  * and host consumes.  As the host irq handler processes interrupts, it
2208  * increments the rptr.  When the rptr catches up with the wptr, all the
2209  * current interrupts have been processed.
2210  */
2211
2212 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2213 {
2214         u32 rb_bufsz;
2215
2216         /* Align ring size */
2217         rb_bufsz = drm_order(ring_size / 4);
2218         ring_size = (1 << rb_bufsz) * 4;
2219         rdev->ih.ring_size = ring_size;
2220         rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2221         rdev->ih.rptr = 0;
2222 }
2223
2224 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2225 {
2226         int r;
2227
2228         /* Allocate ring buffer */
2229         if (rdev->ih.ring_obj == NULL) {
2230                 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2231                                      true,
2232                                      RADEON_GEM_DOMAIN_GTT,
2233                                      &rdev->ih.ring_obj);
2234                 if (r) {
2235                         DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2236                         return r;
2237                 }
2238                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2239                 if (unlikely(r != 0))
2240                         return r;
2241                 r = radeon_bo_pin(rdev->ih.ring_obj,
2242                                   RADEON_GEM_DOMAIN_GTT,
2243                                   &rdev->ih.gpu_addr);
2244                 if (r) {
2245                         radeon_bo_unreserve(rdev->ih.ring_obj);
2246                         DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2247                         return r;
2248                 }
2249                 r = radeon_bo_kmap(rdev->ih.ring_obj,
2250                                    (void **)&rdev->ih.ring);
2251                 radeon_bo_unreserve(rdev->ih.ring_obj);
2252                 if (r) {
2253                         DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2254                         return r;
2255                 }
2256         }
2257         return 0;
2258 }
2259
2260 static void r600_ih_ring_fini(struct radeon_device *rdev)
2261 {
2262         int r;
2263         if (rdev->ih.ring_obj) {
2264                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2265                 if (likely(r == 0)) {
2266                         radeon_bo_kunmap(rdev->ih.ring_obj);
2267                         radeon_bo_unpin(rdev->ih.ring_obj);
2268                         radeon_bo_unreserve(rdev->ih.ring_obj);
2269                 }
2270                 radeon_bo_unref(&rdev->ih.ring_obj);
2271                 rdev->ih.ring = NULL;
2272                 rdev->ih.ring_obj = NULL;
2273         }
2274 }
2275
2276 static void r600_rlc_stop(struct radeon_device *rdev)
2277 {
2278
2279         if (rdev->family >= CHIP_RV770) {
2280                 /* r7xx asics need to soft reset RLC before halting */
2281                 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2282                 RREG32(SRBM_SOFT_RESET);
2283                 udelay(15000);
2284                 WREG32(SRBM_SOFT_RESET, 0);
2285                 RREG32(SRBM_SOFT_RESET);
2286         }
2287
2288         WREG32(RLC_CNTL, 0);
2289 }
2290
2291 static void r600_rlc_start(struct radeon_device *rdev)
2292 {
2293         WREG32(RLC_CNTL, RLC_ENABLE);
2294 }
2295
2296 static int r600_rlc_init(struct radeon_device *rdev)
2297 {
2298         u32 i;
2299         const __be32 *fw_data;
2300
2301         if (!rdev->rlc_fw)
2302                 return -EINVAL;
2303
2304         r600_rlc_stop(rdev);
2305
2306         WREG32(RLC_HB_BASE, 0);
2307         WREG32(RLC_HB_CNTL, 0);
2308         WREG32(RLC_HB_RPTR, 0);
2309         WREG32(RLC_HB_WPTR, 0);
2310         WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2311         WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2312         WREG32(RLC_MC_CNTL, 0);
2313         WREG32(RLC_UCODE_CNTL, 0);
2314
2315         fw_data = (const __be32 *)rdev->rlc_fw->data;
2316         if (rdev->family >= CHIP_RV770) {
2317                 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2318                         WREG32(RLC_UCODE_ADDR, i);
2319                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2320                 }
2321         } else {
2322                 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2323                         WREG32(RLC_UCODE_ADDR, i);
2324                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2325                 }
2326         }
2327         WREG32(RLC_UCODE_ADDR, 0);
2328
2329         r600_rlc_start(rdev);
2330
2331         return 0;
2332 }
2333
2334 static void r600_enable_interrupts(struct radeon_device *rdev)
2335 {
2336         u32 ih_cntl = RREG32(IH_CNTL);
2337         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2338
2339         ih_cntl |= ENABLE_INTR;
2340         ih_rb_cntl |= IH_RB_ENABLE;
2341         WREG32(IH_CNTL, ih_cntl);
2342         WREG32(IH_RB_CNTL, ih_rb_cntl);
2343         rdev->ih.enabled = true;
2344 }
2345
2346 static void r600_disable_interrupts(struct radeon_device *rdev)
2347 {
2348         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2349         u32 ih_cntl = RREG32(IH_CNTL);
2350
2351         ih_rb_cntl &= ~IH_RB_ENABLE;
2352         ih_cntl &= ~ENABLE_INTR;
2353         WREG32(IH_RB_CNTL, ih_rb_cntl);
2354         WREG32(IH_CNTL, ih_cntl);
2355         /* set rptr, wptr to 0 */
2356         WREG32(IH_RB_RPTR, 0);
2357         WREG32(IH_RB_WPTR, 0);
2358         rdev->ih.enabled = false;
2359         rdev->ih.wptr = 0;
2360         rdev->ih.rptr = 0;
2361 }
2362
2363 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2364 {
2365         u32 tmp;
2366
2367         WREG32(CP_INT_CNTL, 0);
2368         WREG32(GRBM_INT_CNTL, 0);
2369         WREG32(DxMODE_INT_MASK, 0);
2370         if (ASIC_IS_DCE3(rdev)) {
2371                 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2372                 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2373                 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2374                 WREG32(DC_HPD1_INT_CONTROL, tmp);
2375                 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2376                 WREG32(DC_HPD2_INT_CONTROL, tmp);
2377                 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2378                 WREG32(DC_HPD3_INT_CONTROL, tmp);
2379                 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2380                 WREG32(DC_HPD4_INT_CONTROL, tmp);
2381                 if (ASIC_IS_DCE32(rdev)) {
2382                         tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2383                         WREG32(DC_HPD5_INT_CONTROL, tmp);
2384                         tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2385                         WREG32(DC_HPD6_INT_CONTROL, tmp);
2386                 }
2387         } else {
2388                 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2389                 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2390                 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2391                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2392                 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2393                 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2394                 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2395                 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2396         }
2397 }
2398
2399 int r600_irq_init(struct radeon_device *rdev)
2400 {
2401         int ret = 0;
2402         int rb_bufsz;
2403         u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2404
2405         /* allocate ring */
2406         ret = r600_ih_ring_alloc(rdev);
2407         if (ret)
2408                 return ret;
2409
2410         /* disable irqs */
2411         r600_disable_interrupts(rdev);
2412
2413         /* init rlc */
2414         ret = r600_rlc_init(rdev);
2415         if (ret) {
2416                 r600_ih_ring_fini(rdev);
2417                 return ret;
2418         }
2419
2420         /* setup interrupt control */
2421         /* set dummy read address to ring address */
2422         WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2423         interrupt_cntl = RREG32(INTERRUPT_CNTL);
2424         /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2425          * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2426          */
2427         interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2428         /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2429         interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2430         WREG32(INTERRUPT_CNTL, interrupt_cntl);
2431
2432         WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2433         rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2434
2435         ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2436                       IH_WPTR_OVERFLOW_CLEAR |
2437                       (rb_bufsz << 1));
2438         /* WPTR writeback, not yet */
2439         /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2440         WREG32(IH_RB_WPTR_ADDR_LO, 0);
2441         WREG32(IH_RB_WPTR_ADDR_HI, 0);
2442
2443         WREG32(IH_RB_CNTL, ih_rb_cntl);
2444
2445         /* set rptr, wptr to 0 */
2446         WREG32(IH_RB_RPTR, 0);
2447         WREG32(IH_RB_WPTR, 0);
2448
2449         /* Default settings for IH_CNTL (disabled at first) */
2450         ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2451         /* RPTR_REARM only works if msi's are enabled */
2452         if (rdev->msi_enabled)
2453                 ih_cntl |= RPTR_REARM;
2454
2455 #ifdef __BIG_ENDIAN
2456         ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2457 #endif
2458         WREG32(IH_CNTL, ih_cntl);
2459
2460         /* force the active interrupt state to all disabled */
2461         r600_disable_interrupt_state(rdev);
2462
2463         /* enable irqs */
2464         r600_enable_interrupts(rdev);
2465
2466         return ret;
2467 }
2468
2469 void r600_irq_suspend(struct radeon_device *rdev)
2470 {
2471         r600_disable_interrupts(rdev);
2472         r600_rlc_stop(rdev);
2473 }
2474
2475 void r600_irq_fini(struct radeon_device *rdev)
2476 {
2477         r600_irq_suspend(rdev);
2478         r600_ih_ring_fini(rdev);
2479 }
2480
2481 int r600_irq_set(struct radeon_device *rdev)
2482 {
2483         u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2484         u32 mode_int = 0;
2485         u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2486
2487         if (!rdev->irq.installed) {
2488                 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2489                 return -EINVAL;
2490         }
2491         /* don't enable anything if the ih is disabled */
2492         if (!rdev->ih.enabled) {
2493                 r600_disable_interrupts(rdev);
2494                 /* force the active interrupt state to all disabled */
2495                 r600_disable_interrupt_state(rdev);
2496                 return 0;
2497         }
2498
2499         if (ASIC_IS_DCE3(rdev)) {
2500                 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2501                 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2502                 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2503                 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2504                 if (ASIC_IS_DCE32(rdev)) {
2505                         hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2506                         hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2507                 }
2508         } else {
2509                 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2510                 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2511                 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2512         }
2513
2514         if (rdev->irq.sw_int) {
2515                 DRM_DEBUG("r600_irq_set: sw int\n");
2516                 cp_int_cntl |= RB_INT_ENABLE;
2517         }
2518         if (rdev->irq.crtc_vblank_int[0]) {
2519                 DRM_DEBUG("r600_irq_set: vblank 0\n");
2520                 mode_int |= D1MODE_VBLANK_INT_MASK;
2521         }
2522         if (rdev->irq.crtc_vblank_int[1]) {
2523                 DRM_DEBUG("r600_irq_set: vblank 1\n");
2524                 mode_int |= D2MODE_VBLANK_INT_MASK;
2525         }
2526         if (rdev->irq.hpd[0]) {
2527                 DRM_DEBUG("r600_irq_set: hpd 1\n");
2528                 hpd1 |= DC_HPDx_INT_EN;
2529         }
2530         if (rdev->irq.hpd[1]) {
2531                 DRM_DEBUG("r600_irq_set: hpd 2\n");
2532                 hpd2 |= DC_HPDx_INT_EN;
2533         }
2534         if (rdev->irq.hpd[2]) {
2535                 DRM_DEBUG("r600_irq_set: hpd 3\n");
2536                 hpd3 |= DC_HPDx_INT_EN;
2537         }
2538         if (rdev->irq.hpd[3]) {
2539                 DRM_DEBUG("r600_irq_set: hpd 4\n");
2540                 hpd4 |= DC_HPDx_INT_EN;
2541         }
2542         if (rdev->irq.hpd[4]) {
2543                 DRM_DEBUG("r600_irq_set: hpd 5\n");
2544                 hpd5 |= DC_HPDx_INT_EN;
2545         }
2546         if (rdev->irq.hpd[5]) {
2547                 DRM_DEBUG("r600_irq_set: hpd 6\n");
2548                 hpd6 |= DC_HPDx_INT_EN;
2549         }
2550
2551         WREG32(CP_INT_CNTL, cp_int_cntl);
2552         WREG32(DxMODE_INT_MASK, mode_int);
2553         if (ASIC_IS_DCE3(rdev)) {
2554                 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2555                 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2556                 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2557                 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2558                 if (ASIC_IS_DCE32(rdev)) {
2559                         WREG32(DC_HPD5_INT_CONTROL, hpd5);
2560                         WREG32(DC_HPD6_INT_CONTROL, hpd6);
2561                 }
2562         } else {
2563                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2564                 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2565                 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2566         }
2567
2568         return 0;
2569 }
2570
2571 static inline void r600_irq_ack(struct radeon_device *rdev,
2572                                 u32 *disp_int,
2573                                 u32 *disp_int_cont,
2574                                 u32 *disp_int_cont2)
2575 {
2576         u32 tmp;
2577
2578         if (ASIC_IS_DCE3(rdev)) {
2579                 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2580                 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2581                 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2582         } else {
2583                 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2584                 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2585                 *disp_int_cont2 = 0;
2586         }
2587
2588         if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2589                 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2590         if (*disp_int & LB_D1_VLINE_INTERRUPT)
2591                 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2592         if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2593                 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2594         if (*disp_int & LB_D2_VLINE_INTERRUPT)
2595                 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2596         if (*disp_int & DC_HPD1_INTERRUPT) {
2597                 if (ASIC_IS_DCE3(rdev)) {
2598                         tmp = RREG32(DC_HPD1_INT_CONTROL);
2599                         tmp |= DC_HPDx_INT_ACK;
2600                         WREG32(DC_HPD1_INT_CONTROL, tmp);
2601                 } else {
2602                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2603                         tmp |= DC_HPDx_INT_ACK;
2604                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2605                 }
2606         }
2607         if (*disp_int & DC_HPD2_INTERRUPT) {
2608                 if (ASIC_IS_DCE3(rdev)) {
2609                         tmp = RREG32(DC_HPD2_INT_CONTROL);
2610                         tmp |= DC_HPDx_INT_ACK;
2611                         WREG32(DC_HPD2_INT_CONTROL, tmp);
2612                 } else {
2613                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2614                         tmp |= DC_HPDx_INT_ACK;
2615                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2616                 }
2617         }
2618         if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2619                 if (ASIC_IS_DCE3(rdev)) {
2620                         tmp = RREG32(DC_HPD3_INT_CONTROL);
2621                         tmp |= DC_HPDx_INT_ACK;
2622                         WREG32(DC_HPD3_INT_CONTROL, tmp);
2623                 } else {
2624                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2625                         tmp |= DC_HPDx_INT_ACK;
2626                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2627                 }
2628         }
2629         if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2630                 tmp = RREG32(DC_HPD4_INT_CONTROL);
2631                 tmp |= DC_HPDx_INT_ACK;
2632                 WREG32(DC_HPD4_INT_CONTROL, tmp);
2633         }
2634         if (ASIC_IS_DCE32(rdev)) {
2635                 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2636                         tmp = RREG32(DC_HPD5_INT_CONTROL);
2637                         tmp |= DC_HPDx_INT_ACK;
2638                         WREG32(DC_HPD5_INT_CONTROL, tmp);
2639                 }
2640                 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2641                         tmp = RREG32(DC_HPD5_INT_CONTROL);
2642                         tmp |= DC_HPDx_INT_ACK;
2643                         WREG32(DC_HPD6_INT_CONTROL, tmp);
2644                 }
2645         }
2646 }
2647
2648 void r600_irq_disable(struct radeon_device *rdev)
2649 {
2650         u32 disp_int, disp_int_cont, disp_int_cont2;
2651
2652         r600_disable_interrupts(rdev);
2653         /* Wait and acknowledge irq */
2654         mdelay(1);
2655         r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2656         r600_disable_interrupt_state(rdev);
2657 }
2658
2659 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2660 {
2661         u32 wptr, tmp;
2662
2663         /* XXX use writeback */
2664         wptr = RREG32(IH_RB_WPTR);
2665
2666         if (wptr & RB_OVERFLOW) {
2667                 /* When a ring buffer overflow happen start parsing interrupt
2668                  * from the last not overwritten vector (wptr + 16). Hopefully
2669                  * this should allow us to catchup.
2670                  */
2671                 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2672                         wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2673                 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2674                 tmp = RREG32(IH_RB_CNTL);
2675                 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2676                 WREG32(IH_RB_CNTL, tmp);
2677         }
2678         return (wptr & rdev->ih.ptr_mask);
2679 }
2680
2681 /*        r600 IV Ring
2682  * Each IV ring entry is 128 bits:
2683  * [7:0]    - interrupt source id
2684  * [31:8]   - reserved
2685  * [59:32]  - interrupt source data
2686  * [127:60]  - reserved
2687  *
2688  * The basic interrupt vector entries
2689  * are decoded as follows:
2690  * src_id  src_data  description
2691  *      1         0  D1 Vblank
2692  *      1         1  D1 Vline
2693  *      5         0  D2 Vblank
2694  *      5         1  D2 Vline
2695  *     19         0  FP Hot plug detection A
2696  *     19         1  FP Hot plug detection B
2697  *     19         2  DAC A auto-detection
2698  *     19         3  DAC B auto-detection
2699  *    176         -  CP_INT RB
2700  *    177         -  CP_INT IB1
2701  *    178         -  CP_INT IB2
2702  *    181         -  EOP Interrupt
2703  *    233         -  GUI Idle
2704  *
2705  * Note, these are based on r600 and may need to be
2706  * adjusted or added to on newer asics
2707  */
2708
2709 int r600_irq_process(struct radeon_device *rdev)
2710 {
2711         u32 wptr = r600_get_ih_wptr(rdev);
2712         u32 rptr = rdev->ih.rptr;
2713         u32 src_id, src_data;
2714         u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2715         unsigned long flags;
2716         bool queue_hotplug = false;
2717
2718         DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2719         if (!rdev->ih.enabled)
2720                 return IRQ_NONE;
2721
2722         spin_lock_irqsave(&rdev->ih.lock, flags);
2723
2724         if (rptr == wptr) {
2725                 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2726                 return IRQ_NONE;
2727         }
2728         if (rdev->shutdown) {
2729                 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2730                 return IRQ_NONE;
2731         }
2732
2733 restart_ih:
2734         /* display interrupts */
2735         r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2736
2737         rdev->ih.wptr = wptr;
2738         while (rptr != wptr) {
2739                 /* wptr/rptr are in bytes! */
2740                 ring_index = rptr / 4;
2741                 src_id =  rdev->ih.ring[ring_index] & 0xff;
2742                 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2743
2744                 switch (src_id) {
2745                 case 1: /* D1 vblank/vline */
2746                         switch (src_data) {
2747                         case 0: /* D1 vblank */
2748                                 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2749                                         drm_handle_vblank(rdev->ddev, 0);
2750                                         rdev->pm.vblank_sync = true;
2751                                         wake_up(&rdev->irq.vblank_queue);
2752                                         disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2753                                         DRM_DEBUG("IH: D1 vblank\n");
2754                                 }
2755                                 break;
2756                         case 1: /* D1 vline */
2757                                 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2758                                         disp_int &= ~LB_D1_VLINE_INTERRUPT;
2759                                         DRM_DEBUG("IH: D1 vline\n");
2760                                 }
2761                                 break;
2762                         default:
2763                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2764                                 break;
2765                         }
2766                         break;
2767                 case 5: /* D2 vblank/vline */
2768                         switch (src_data) {
2769                         case 0: /* D2 vblank */
2770                                 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2771                                         drm_handle_vblank(rdev->ddev, 1);
2772                                         rdev->pm.vblank_sync = true;
2773                                         wake_up(&rdev->irq.vblank_queue);
2774                                         disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2775                                         DRM_DEBUG("IH: D2 vblank\n");
2776                                 }
2777                                 break;
2778                         case 1: /* D1 vline */
2779                                 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2780                                         disp_int &= ~LB_D2_VLINE_INTERRUPT;
2781                                         DRM_DEBUG("IH: D2 vline\n");
2782                                 }
2783                                 break;
2784                         default:
2785                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2786                                 break;
2787                         }
2788                         break;
2789                 case 19: /* HPD/DAC hotplug */
2790                         switch (src_data) {
2791                         case 0:
2792                                 if (disp_int & DC_HPD1_INTERRUPT) {
2793                                         disp_int &= ~DC_HPD1_INTERRUPT;
2794                                         queue_hotplug = true;
2795                                         DRM_DEBUG("IH: HPD1\n");
2796                                 }
2797                                 break;
2798                         case 1:
2799                                 if (disp_int & DC_HPD2_INTERRUPT) {
2800                                         disp_int &= ~DC_HPD2_INTERRUPT;
2801                                         queue_hotplug = true;
2802                                         DRM_DEBUG("IH: HPD2\n");
2803                                 }
2804                                 break;
2805                         case 4:
2806                                 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2807                                         disp_int_cont &= ~DC_HPD3_INTERRUPT;
2808                                         queue_hotplug = true;
2809                                         DRM_DEBUG("IH: HPD3\n");
2810                                 }
2811                                 break;
2812                         case 5:
2813                                 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2814                                         disp_int_cont &= ~DC_HPD4_INTERRUPT;
2815                                         queue_hotplug = true;
2816                                         DRM_DEBUG("IH: HPD4\n");
2817                                 }
2818                                 break;
2819                         case 10:
2820                                 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2821                                         disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
2822                                         queue_hotplug = true;
2823                                         DRM_DEBUG("IH: HPD5\n");
2824                                 }
2825                                 break;
2826                         case 12:
2827                                 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2828                                         disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
2829                                         queue_hotplug = true;
2830                                         DRM_DEBUG("IH: HPD6\n");
2831                                 }
2832                                 break;
2833                         default:
2834                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2835                                 break;
2836                         }
2837                         break;
2838                 case 176: /* CP_INT in ring buffer */
2839                 case 177: /* CP_INT in IB1 */
2840                 case 178: /* CP_INT in IB2 */
2841                         DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2842                         radeon_fence_process(rdev);
2843                         break;
2844                 case 181: /* CP EOP event */
2845                         DRM_DEBUG("IH: CP EOP\n");
2846                         break;
2847                 default:
2848                         DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2849                         break;
2850                 }
2851
2852                 /* wptr/rptr are in bytes! */
2853                 rptr += 16;
2854                 rptr &= rdev->ih.ptr_mask;
2855         }
2856         /* make sure wptr hasn't changed while processing */
2857         wptr = r600_get_ih_wptr(rdev);
2858         if (wptr != rdev->ih.wptr)
2859                 goto restart_ih;
2860         if (queue_hotplug)
2861                 queue_work(rdev->wq, &rdev->hotplug_work);
2862         rdev->ih.rptr = rptr;
2863         WREG32(IH_RB_RPTR, rdev->ih.rptr);
2864         spin_unlock_irqrestore(&rdev->ih.lock, flags);
2865         return IRQ_HANDLED;
2866 }
2867
2868 /*
2869  * Debugfs info
2870  */
2871 #if defined(CONFIG_DEBUG_FS)
2872
2873 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
2874 {
2875         struct drm_info_node *node = (struct drm_info_node *) m->private;
2876         struct drm_device *dev = node->minor->dev;
2877         struct radeon_device *rdev = dev->dev_private;
2878         unsigned count, i, j;
2879
2880         radeon_ring_free_size(rdev);
2881         count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
2882         seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
2883         seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
2884         seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2885         seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2886         seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
2887         seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2888         seq_printf(m, "%u dwords in ring\n", count);
2889         i = rdev->cp.rptr;
2890         for (j = 0; j <= count; j++) {
2891                 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2892                 i = (i + 1) & rdev->cp.ptr_mask;
2893         }
2894         return 0;
2895 }
2896
2897 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
2898 {
2899         struct drm_info_node *node = (struct drm_info_node *) m->private;
2900         struct drm_device *dev = node->minor->dev;
2901         struct radeon_device *rdev = dev->dev_private;
2902
2903         DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
2904         DREG32_SYS(m, rdev, VM_L2_STATUS);
2905         return 0;
2906 }
2907
2908 static struct drm_info_list r600_mc_info_list[] = {
2909         {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
2910         {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
2911 };
2912 #endif
2913
2914 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2915 {
2916 #if defined(CONFIG_DEBUG_FS)
2917         return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
2918 #else
2919         return 0;
2920 #endif
2921 }
2922
2923 /**
2924  * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2925  * rdev: radeon device structure
2926  * bo: buffer object struct which userspace is waiting for idle
2927  *
2928  * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2929  * through ring buffer, this leads to corruption in rendering, see
2930  * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2931  * directly perform HDP flush by writing register through MMIO.
2932  */
2933 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2934 {
2935         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2936 }