Merge branch 'drm-ttm-unmappable' into drm-core-next
[pandora-kernel.git] / drivers / gpu / drm / radeon / r600.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include "drmP.h"
33 #include "radeon_drm.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
50
51 /* Firmware Names */
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
88
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
94
95 /* hpd for digital panel detect/disconnect */
96 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
97 {
98         bool connected = false;
99
100         if (ASIC_IS_DCE3(rdev)) {
101                 switch (hpd) {
102                 case RADEON_HPD_1:
103                         if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
104                                 connected = true;
105                         break;
106                 case RADEON_HPD_2:
107                         if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
108                                 connected = true;
109                         break;
110                 case RADEON_HPD_3:
111                         if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
112                                 connected = true;
113                         break;
114                 case RADEON_HPD_4:
115                         if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
116                                 connected = true;
117                         break;
118                         /* DCE 3.2 */
119                 case RADEON_HPD_5:
120                         if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
121                                 connected = true;
122                         break;
123                 case RADEON_HPD_6:
124                         if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
125                                 connected = true;
126                         break;
127                 default:
128                         break;
129                 }
130         } else {
131                 switch (hpd) {
132                 case RADEON_HPD_1:
133                         if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
134                                 connected = true;
135                         break;
136                 case RADEON_HPD_2:
137                         if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
138                                 connected = true;
139                         break;
140                 case RADEON_HPD_3:
141                         if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
142                                 connected = true;
143                         break;
144                 default:
145                         break;
146                 }
147         }
148         return connected;
149 }
150
151 void r600_hpd_set_polarity(struct radeon_device *rdev,
152                            enum radeon_hpd_id hpd)
153 {
154         u32 tmp;
155         bool connected = r600_hpd_sense(rdev, hpd);
156
157         if (ASIC_IS_DCE3(rdev)) {
158                 switch (hpd) {
159                 case RADEON_HPD_1:
160                         tmp = RREG32(DC_HPD1_INT_CONTROL);
161                         if (connected)
162                                 tmp &= ~DC_HPDx_INT_POLARITY;
163                         else
164                                 tmp |= DC_HPDx_INT_POLARITY;
165                         WREG32(DC_HPD1_INT_CONTROL, tmp);
166                         break;
167                 case RADEON_HPD_2:
168                         tmp = RREG32(DC_HPD2_INT_CONTROL);
169                         if (connected)
170                                 tmp &= ~DC_HPDx_INT_POLARITY;
171                         else
172                                 tmp |= DC_HPDx_INT_POLARITY;
173                         WREG32(DC_HPD2_INT_CONTROL, tmp);
174                         break;
175                 case RADEON_HPD_3:
176                         tmp = RREG32(DC_HPD3_INT_CONTROL);
177                         if (connected)
178                                 tmp &= ~DC_HPDx_INT_POLARITY;
179                         else
180                                 tmp |= DC_HPDx_INT_POLARITY;
181                         WREG32(DC_HPD3_INT_CONTROL, tmp);
182                         break;
183                 case RADEON_HPD_4:
184                         tmp = RREG32(DC_HPD4_INT_CONTROL);
185                         if (connected)
186                                 tmp &= ~DC_HPDx_INT_POLARITY;
187                         else
188                                 tmp |= DC_HPDx_INT_POLARITY;
189                         WREG32(DC_HPD4_INT_CONTROL, tmp);
190                         break;
191                 case RADEON_HPD_5:
192                         tmp = RREG32(DC_HPD5_INT_CONTROL);
193                         if (connected)
194                                 tmp &= ~DC_HPDx_INT_POLARITY;
195                         else
196                                 tmp |= DC_HPDx_INT_POLARITY;
197                         WREG32(DC_HPD5_INT_CONTROL, tmp);
198                         break;
199                         /* DCE 3.2 */
200                 case RADEON_HPD_6:
201                         tmp = RREG32(DC_HPD6_INT_CONTROL);
202                         if (connected)
203                                 tmp &= ~DC_HPDx_INT_POLARITY;
204                         else
205                                 tmp |= DC_HPDx_INT_POLARITY;
206                         WREG32(DC_HPD6_INT_CONTROL, tmp);
207                         break;
208                 default:
209                         break;
210                 }
211         } else {
212                 switch (hpd) {
213                 case RADEON_HPD_1:
214                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
215                         if (connected)
216                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
217                         else
218                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
219                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
220                         break;
221                 case RADEON_HPD_2:
222                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
223                         if (connected)
224                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
225                         else
226                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
227                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
228                         break;
229                 case RADEON_HPD_3:
230                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
231                         if (connected)
232                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
233                         else
234                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
235                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
236                         break;
237                 default:
238                         break;
239                 }
240         }
241 }
242
243 void r600_hpd_init(struct radeon_device *rdev)
244 {
245         struct drm_device *dev = rdev->ddev;
246         struct drm_connector *connector;
247
248         if (ASIC_IS_DCE3(rdev)) {
249                 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
250                 if (ASIC_IS_DCE32(rdev))
251                         tmp |= DC_HPDx_EN;
252
253                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
254                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
255                         switch (radeon_connector->hpd.hpd) {
256                         case RADEON_HPD_1:
257                                 WREG32(DC_HPD1_CONTROL, tmp);
258                                 rdev->irq.hpd[0] = true;
259                                 break;
260                         case RADEON_HPD_2:
261                                 WREG32(DC_HPD2_CONTROL, tmp);
262                                 rdev->irq.hpd[1] = true;
263                                 break;
264                         case RADEON_HPD_3:
265                                 WREG32(DC_HPD3_CONTROL, tmp);
266                                 rdev->irq.hpd[2] = true;
267                                 break;
268                         case RADEON_HPD_4:
269                                 WREG32(DC_HPD4_CONTROL, tmp);
270                                 rdev->irq.hpd[3] = true;
271                                 break;
272                                 /* DCE 3.2 */
273                         case RADEON_HPD_5:
274                                 WREG32(DC_HPD5_CONTROL, tmp);
275                                 rdev->irq.hpd[4] = true;
276                                 break;
277                         case RADEON_HPD_6:
278                                 WREG32(DC_HPD6_CONTROL, tmp);
279                                 rdev->irq.hpd[5] = true;
280                                 break;
281                         default:
282                                 break;
283                         }
284                 }
285         } else {
286                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
287                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
288                         switch (radeon_connector->hpd.hpd) {
289                         case RADEON_HPD_1:
290                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
291                                 rdev->irq.hpd[0] = true;
292                                 break;
293                         case RADEON_HPD_2:
294                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
295                                 rdev->irq.hpd[1] = true;
296                                 break;
297                         case RADEON_HPD_3:
298                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
299                                 rdev->irq.hpd[2] = true;
300                                 break;
301                         default:
302                                 break;
303                         }
304                 }
305         }
306         if (rdev->irq.installed)
307                 r600_irq_set(rdev);
308 }
309
310 void r600_hpd_fini(struct radeon_device *rdev)
311 {
312         struct drm_device *dev = rdev->ddev;
313         struct drm_connector *connector;
314
315         if (ASIC_IS_DCE3(rdev)) {
316                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
317                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
318                         switch (radeon_connector->hpd.hpd) {
319                         case RADEON_HPD_1:
320                                 WREG32(DC_HPD1_CONTROL, 0);
321                                 rdev->irq.hpd[0] = false;
322                                 break;
323                         case RADEON_HPD_2:
324                                 WREG32(DC_HPD2_CONTROL, 0);
325                                 rdev->irq.hpd[1] = false;
326                                 break;
327                         case RADEON_HPD_3:
328                                 WREG32(DC_HPD3_CONTROL, 0);
329                                 rdev->irq.hpd[2] = false;
330                                 break;
331                         case RADEON_HPD_4:
332                                 WREG32(DC_HPD4_CONTROL, 0);
333                                 rdev->irq.hpd[3] = false;
334                                 break;
335                                 /* DCE 3.2 */
336                         case RADEON_HPD_5:
337                                 WREG32(DC_HPD5_CONTROL, 0);
338                                 rdev->irq.hpd[4] = false;
339                                 break;
340                         case RADEON_HPD_6:
341                                 WREG32(DC_HPD6_CONTROL, 0);
342                                 rdev->irq.hpd[5] = false;
343                                 break;
344                         default:
345                                 break;
346                         }
347                 }
348         } else {
349                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
350                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
351                         switch (radeon_connector->hpd.hpd) {
352                         case RADEON_HPD_1:
353                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
354                                 rdev->irq.hpd[0] = false;
355                                 break;
356                         case RADEON_HPD_2:
357                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
358                                 rdev->irq.hpd[1] = false;
359                                 break;
360                         case RADEON_HPD_3:
361                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
362                                 rdev->irq.hpd[2] = false;
363                                 break;
364                         default:
365                                 break;
366                         }
367                 }
368         }
369 }
370
371 /*
372  * R600 PCIE GART
373  */
374 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
375 {
376         unsigned i;
377         u32 tmp;
378
379         /* flush hdp cache so updates hit vram */
380         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
381
382         WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
383         WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
384         WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
385         for (i = 0; i < rdev->usec_timeout; i++) {
386                 /* read MC_STATUS */
387                 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
388                 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
389                 if (tmp == 2) {
390                         printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
391                         return;
392                 }
393                 if (tmp) {
394                         return;
395                 }
396                 udelay(1);
397         }
398 }
399
400 int r600_pcie_gart_init(struct radeon_device *rdev)
401 {
402         int r;
403
404         if (rdev->gart.table.vram.robj) {
405                 WARN(1, "R600 PCIE GART already initialized.\n");
406                 return 0;
407         }
408         /* Initialize common gart structure */
409         r = radeon_gart_init(rdev);
410         if (r)
411                 return r;
412         rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
413         return radeon_gart_table_vram_alloc(rdev);
414 }
415
416 int r600_pcie_gart_enable(struct radeon_device *rdev)
417 {
418         u32 tmp;
419         int r, i;
420
421         if (rdev->gart.table.vram.robj == NULL) {
422                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
423                 return -EINVAL;
424         }
425         r = radeon_gart_table_vram_pin(rdev);
426         if (r)
427                 return r;
428         radeon_gart_restore(rdev);
429
430         /* Setup L2 cache */
431         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
432                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
433                                 EFFECTIVE_L2_QUEUE_SIZE(7));
434         WREG32(VM_L2_CNTL2, 0);
435         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
436         /* Setup TLB control */
437         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
438                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
439                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
440                 ENABLE_WAIT_L2_QUERY;
441         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
442         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
443         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
444         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
445         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
446         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
447         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
448         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
449         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
450         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
451         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
452         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
453         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
454         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
455         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
456         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
457         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
458         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
459                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
460         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
461                         (u32)(rdev->dummy_page.addr >> 12));
462         for (i = 1; i < 7; i++)
463                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
464
465         r600_pcie_gart_tlb_flush(rdev);
466         rdev->gart.ready = true;
467         return 0;
468 }
469
470 void r600_pcie_gart_disable(struct radeon_device *rdev)
471 {
472         u32 tmp;
473         int i, r;
474
475         /* Disable all tables */
476         for (i = 0; i < 7; i++)
477                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
478
479         /* Disable L2 cache */
480         WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
481                                 EFFECTIVE_L2_QUEUE_SIZE(7));
482         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
483         /* Setup L1 TLB control */
484         tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
485                 ENABLE_WAIT_L2_QUERY;
486         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
487         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
488         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
489         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
490         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
491         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
492         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
493         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
494         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
495         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
496         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
497         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
498         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
499         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
500         if (rdev->gart.table.vram.robj) {
501                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
502                 if (likely(r == 0)) {
503                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
504                         radeon_bo_unpin(rdev->gart.table.vram.robj);
505                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
506                 }
507         }
508 }
509
510 void r600_pcie_gart_fini(struct radeon_device *rdev)
511 {
512         radeon_gart_fini(rdev);
513         r600_pcie_gart_disable(rdev);
514         radeon_gart_table_vram_free(rdev);
515 }
516
517 void r600_agp_enable(struct radeon_device *rdev)
518 {
519         u32 tmp;
520         int i;
521
522         /* Setup L2 cache */
523         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
524                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
525                                 EFFECTIVE_L2_QUEUE_SIZE(7));
526         WREG32(VM_L2_CNTL2, 0);
527         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
528         /* Setup TLB control */
529         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
530                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
531                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
532                 ENABLE_WAIT_L2_QUERY;
533         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
534         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
535         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
536         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
537         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
538         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
539         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
540         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
541         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
542         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
543         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
544         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
545         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
546         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
547         for (i = 0; i < 7; i++)
548                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
549 }
550
551 int r600_mc_wait_for_idle(struct radeon_device *rdev)
552 {
553         unsigned i;
554         u32 tmp;
555
556         for (i = 0; i < rdev->usec_timeout; i++) {
557                 /* read MC_STATUS */
558                 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
559                 if (!tmp)
560                         return 0;
561                 udelay(1);
562         }
563         return -1;
564 }
565
566 static void r600_mc_program(struct radeon_device *rdev)
567 {
568         struct rv515_mc_save save;
569         u32 tmp;
570         int i, j;
571
572         /* Initialize HDP */
573         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
574                 WREG32((0x2c14 + j), 0x00000000);
575                 WREG32((0x2c18 + j), 0x00000000);
576                 WREG32((0x2c1c + j), 0x00000000);
577                 WREG32((0x2c20 + j), 0x00000000);
578                 WREG32((0x2c24 + j), 0x00000000);
579         }
580         WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
581
582         rv515_mc_stop(rdev, &save);
583         if (r600_mc_wait_for_idle(rdev)) {
584                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
585         }
586         /* Lockout access through VGA aperture (doesn't exist before R600) */
587         WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
588         /* Update configuration */
589         if (rdev->flags & RADEON_IS_AGP) {
590                 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
591                         /* VRAM before AGP */
592                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
593                                 rdev->mc.vram_start >> 12);
594                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
595                                 rdev->mc.gtt_end >> 12);
596                 } else {
597                         /* VRAM after AGP */
598                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
599                                 rdev->mc.gtt_start >> 12);
600                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
601                                 rdev->mc.vram_end >> 12);
602                 }
603         } else {
604                 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
605                 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
606         }
607         WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
608         tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
609         tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
610         WREG32(MC_VM_FB_LOCATION, tmp);
611         WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
612         WREG32(HDP_NONSURFACE_INFO, (2 << 7));
613         WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
614         if (rdev->flags & RADEON_IS_AGP) {
615                 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
616                 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
617                 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
618         } else {
619                 WREG32(MC_VM_AGP_BASE, 0);
620                 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
621                 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
622         }
623         if (r600_mc_wait_for_idle(rdev)) {
624                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
625         }
626         rv515_mc_resume(rdev, &save);
627         /* we need to own VRAM, so turn off the VGA renderer here
628          * to stop it overwriting our objects */
629         rv515_vga_render_disable(rdev);
630 }
631
632 /**
633  * r600_vram_gtt_location - try to find VRAM & GTT location
634  * @rdev: radeon device structure holding all necessary informations
635  * @mc: memory controller structure holding memory informations
636  *
637  * Function will place try to place VRAM at same place as in CPU (PCI)
638  * address space as some GPU seems to have issue when we reprogram at
639  * different address space.
640  *
641  * If there is not enough space to fit the unvisible VRAM after the
642  * aperture then we limit the VRAM size to the aperture.
643  *
644  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
645  * them to be in one from GPU point of view so that we can program GPU to
646  * catch access outside them (weird GPU policy see ??).
647  *
648  * This function will never fails, worst case are limiting VRAM or GTT.
649  *
650  * Note: GTT start, end, size should be initialized before calling this
651  * function on AGP platform.
652  */
653 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
654 {
655         u64 size_bf, size_af;
656
657         if (mc->mc_vram_size > 0xE0000000) {
658                 /* leave room for at least 512M GTT */
659                 dev_warn(rdev->dev, "limiting VRAM\n");
660                 mc->real_vram_size = 0xE0000000;
661                 mc->mc_vram_size = 0xE0000000;
662         }
663         if (rdev->flags & RADEON_IS_AGP) {
664                 size_bf = mc->gtt_start;
665                 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
666                 if (size_bf > size_af) {
667                         if (mc->mc_vram_size > size_bf) {
668                                 dev_warn(rdev->dev, "limiting VRAM\n");
669                                 mc->real_vram_size = size_bf;
670                                 mc->mc_vram_size = size_bf;
671                         }
672                         mc->vram_start = mc->gtt_start - mc->mc_vram_size;
673                 } else {
674                         if (mc->mc_vram_size > size_af) {
675                                 dev_warn(rdev->dev, "limiting VRAM\n");
676                                 mc->real_vram_size = size_af;
677                                 mc->mc_vram_size = size_af;
678                         }
679                         mc->vram_start = mc->gtt_end;
680                 }
681                 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
682                 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
683                                 mc->mc_vram_size >> 20, mc->vram_start,
684                                 mc->vram_end, mc->real_vram_size >> 20);
685         } else {
686                 u64 base = 0;
687                 if (rdev->flags & RADEON_IS_IGP)
688                         base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
689                 radeon_vram_location(rdev, &rdev->mc, base);
690                 radeon_gtt_location(rdev, mc);
691         }
692 }
693
694 int r600_mc_init(struct radeon_device *rdev)
695 {
696         u32 tmp;
697         int chansize, numchan;
698
699         /* Get VRAM informations */
700         rdev->mc.vram_is_ddr = true;
701         tmp = RREG32(RAMCFG);
702         if (tmp & CHANSIZE_OVERRIDE) {
703                 chansize = 16;
704         } else if (tmp & CHANSIZE_MASK) {
705                 chansize = 64;
706         } else {
707                 chansize = 32;
708         }
709         tmp = RREG32(CHMAP);
710         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
711         case 0:
712         default:
713                 numchan = 1;
714                 break;
715         case 1:
716                 numchan = 2;
717                 break;
718         case 2:
719                 numchan = 4;
720                 break;
721         case 3:
722                 numchan = 8;
723                 break;
724         }
725         rdev->mc.vram_width = numchan * chansize;
726         /* Could aper size report 0 ? */
727         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
728         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
729         /* Setup GPU memory space */
730         rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
731         rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
732         rdev->mc.visible_vram_size = rdev->mc.aper_size;
733         r600_vram_gtt_location(rdev, &rdev->mc);
734
735         if (rdev->flags & RADEON_IS_IGP)
736                 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
737         radeon_update_bandwidth_info(rdev);
738         return 0;
739 }
740
741 /* We doesn't check that the GPU really needs a reset we simply do the
742  * reset, it's up to the caller to determine if the GPU needs one. We
743  * might add an helper function to check that.
744  */
745 int r600_gpu_soft_reset(struct radeon_device *rdev)
746 {
747         struct rv515_mc_save save;
748         u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
749                                 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
750                                 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
751                                 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
752                                 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
753                                 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
754                                 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
755                                 S_008010_GUI_ACTIVE(1);
756         u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
757                         S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
758                         S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
759                         S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
760                         S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
761                         S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
762                         S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
763                         S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
764         u32 tmp;
765
766         dev_info(rdev->dev, "GPU softreset \n");
767         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
768                 RREG32(R_008010_GRBM_STATUS));
769         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
770                 RREG32(R_008014_GRBM_STATUS2));
771         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
772                 RREG32(R_000E50_SRBM_STATUS));
773         rv515_mc_stop(rdev, &save);
774         if (r600_mc_wait_for_idle(rdev)) {
775                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
776         }
777         /* Disable CP parsing/prefetching */
778         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
779         /* Check if any of the rendering block is busy and reset it */
780         if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
781             (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
782                 tmp = S_008020_SOFT_RESET_CR(1) |
783                         S_008020_SOFT_RESET_DB(1) |
784                         S_008020_SOFT_RESET_CB(1) |
785                         S_008020_SOFT_RESET_PA(1) |
786                         S_008020_SOFT_RESET_SC(1) |
787                         S_008020_SOFT_RESET_SMX(1) |
788                         S_008020_SOFT_RESET_SPI(1) |
789                         S_008020_SOFT_RESET_SX(1) |
790                         S_008020_SOFT_RESET_SH(1) |
791                         S_008020_SOFT_RESET_TC(1) |
792                         S_008020_SOFT_RESET_TA(1) |
793                         S_008020_SOFT_RESET_VC(1) |
794                         S_008020_SOFT_RESET_VGT(1);
795                 dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
796                 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
797                 RREG32(R_008020_GRBM_SOFT_RESET);
798                 mdelay(15);
799                 WREG32(R_008020_GRBM_SOFT_RESET, 0);
800         }
801         /* Reset CP (we always reset CP) */
802         tmp = S_008020_SOFT_RESET_CP(1);
803         dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
804         WREG32(R_008020_GRBM_SOFT_RESET, tmp);
805         RREG32(R_008020_GRBM_SOFT_RESET);
806         mdelay(15);
807         WREG32(R_008020_GRBM_SOFT_RESET, 0);
808         /* Wait a little for things to settle down */
809         mdelay(1);
810         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
811                 RREG32(R_008010_GRBM_STATUS));
812         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
813                 RREG32(R_008014_GRBM_STATUS2));
814         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
815                 RREG32(R_000E50_SRBM_STATUS));
816         rv515_mc_resume(rdev, &save);
817         return 0;
818 }
819
820 bool r600_gpu_is_lockup(struct radeon_device *rdev)
821 {
822         u32 srbm_status;
823         u32 grbm_status;
824         u32 grbm_status2;
825         int r;
826
827         srbm_status = RREG32(R_000E50_SRBM_STATUS);
828         grbm_status = RREG32(R_008010_GRBM_STATUS);
829         grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
830         if (!G_008010_GUI_ACTIVE(grbm_status)) {
831                 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
832                 return false;
833         }
834         /* force CP activities */
835         r = radeon_ring_lock(rdev, 2);
836         if (!r) {
837                 /* PACKET2 NOP */
838                 radeon_ring_write(rdev, 0x80000000);
839                 radeon_ring_write(rdev, 0x80000000);
840                 radeon_ring_unlock_commit(rdev);
841         }
842         rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
843         return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
844 }
845
846 int r600_asic_reset(struct radeon_device *rdev)
847 {
848         return r600_gpu_soft_reset(rdev);
849 }
850
851 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
852                                              u32 num_backends,
853                                              u32 backend_disable_mask)
854 {
855         u32 backend_map = 0;
856         u32 enabled_backends_mask;
857         u32 enabled_backends_count;
858         u32 cur_pipe;
859         u32 swizzle_pipe[R6XX_MAX_PIPES];
860         u32 cur_backend;
861         u32 i;
862
863         if (num_tile_pipes > R6XX_MAX_PIPES)
864                 num_tile_pipes = R6XX_MAX_PIPES;
865         if (num_tile_pipes < 1)
866                 num_tile_pipes = 1;
867         if (num_backends > R6XX_MAX_BACKENDS)
868                 num_backends = R6XX_MAX_BACKENDS;
869         if (num_backends < 1)
870                 num_backends = 1;
871
872         enabled_backends_mask = 0;
873         enabled_backends_count = 0;
874         for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
875                 if (((backend_disable_mask >> i) & 1) == 0) {
876                         enabled_backends_mask |= (1 << i);
877                         ++enabled_backends_count;
878                 }
879                 if (enabled_backends_count == num_backends)
880                         break;
881         }
882
883         if (enabled_backends_count == 0) {
884                 enabled_backends_mask = 1;
885                 enabled_backends_count = 1;
886         }
887
888         if (enabled_backends_count != num_backends)
889                 num_backends = enabled_backends_count;
890
891         memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
892         switch (num_tile_pipes) {
893         case 1:
894                 swizzle_pipe[0] = 0;
895                 break;
896         case 2:
897                 swizzle_pipe[0] = 0;
898                 swizzle_pipe[1] = 1;
899                 break;
900         case 3:
901                 swizzle_pipe[0] = 0;
902                 swizzle_pipe[1] = 1;
903                 swizzle_pipe[2] = 2;
904                 break;
905         case 4:
906                 swizzle_pipe[0] = 0;
907                 swizzle_pipe[1] = 1;
908                 swizzle_pipe[2] = 2;
909                 swizzle_pipe[3] = 3;
910                 break;
911         case 5:
912                 swizzle_pipe[0] = 0;
913                 swizzle_pipe[1] = 1;
914                 swizzle_pipe[2] = 2;
915                 swizzle_pipe[3] = 3;
916                 swizzle_pipe[4] = 4;
917                 break;
918         case 6:
919                 swizzle_pipe[0] = 0;
920                 swizzle_pipe[1] = 2;
921                 swizzle_pipe[2] = 4;
922                 swizzle_pipe[3] = 5;
923                 swizzle_pipe[4] = 1;
924                 swizzle_pipe[5] = 3;
925                 break;
926         case 7:
927                 swizzle_pipe[0] = 0;
928                 swizzle_pipe[1] = 2;
929                 swizzle_pipe[2] = 4;
930                 swizzle_pipe[3] = 6;
931                 swizzle_pipe[4] = 1;
932                 swizzle_pipe[5] = 3;
933                 swizzle_pipe[6] = 5;
934                 break;
935         case 8:
936                 swizzle_pipe[0] = 0;
937                 swizzle_pipe[1] = 2;
938                 swizzle_pipe[2] = 4;
939                 swizzle_pipe[3] = 6;
940                 swizzle_pipe[4] = 1;
941                 swizzle_pipe[5] = 3;
942                 swizzle_pipe[6] = 5;
943                 swizzle_pipe[7] = 7;
944                 break;
945         }
946
947         cur_backend = 0;
948         for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
949                 while (((1 << cur_backend) & enabled_backends_mask) == 0)
950                         cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
951
952                 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
953
954                 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
955         }
956
957         return backend_map;
958 }
959
960 int r600_count_pipe_bits(uint32_t val)
961 {
962         int i, ret = 0;
963
964         for (i = 0; i < 32; i++) {
965                 ret += val & 1;
966                 val >>= 1;
967         }
968         return ret;
969 }
970
971 void r600_gpu_init(struct radeon_device *rdev)
972 {
973         u32 tiling_config;
974         u32 ramcfg;
975         u32 backend_map;
976         u32 cc_rb_backend_disable;
977         u32 cc_gc_shader_pipe_config;
978         u32 tmp;
979         int i, j;
980         u32 sq_config;
981         u32 sq_gpr_resource_mgmt_1 = 0;
982         u32 sq_gpr_resource_mgmt_2 = 0;
983         u32 sq_thread_resource_mgmt = 0;
984         u32 sq_stack_resource_mgmt_1 = 0;
985         u32 sq_stack_resource_mgmt_2 = 0;
986
987         /* FIXME: implement */
988         switch (rdev->family) {
989         case CHIP_R600:
990                 rdev->config.r600.max_pipes = 4;
991                 rdev->config.r600.max_tile_pipes = 8;
992                 rdev->config.r600.max_simds = 4;
993                 rdev->config.r600.max_backends = 4;
994                 rdev->config.r600.max_gprs = 256;
995                 rdev->config.r600.max_threads = 192;
996                 rdev->config.r600.max_stack_entries = 256;
997                 rdev->config.r600.max_hw_contexts = 8;
998                 rdev->config.r600.max_gs_threads = 16;
999                 rdev->config.r600.sx_max_export_size = 128;
1000                 rdev->config.r600.sx_max_export_pos_size = 16;
1001                 rdev->config.r600.sx_max_export_smx_size = 128;
1002                 rdev->config.r600.sq_num_cf_insts = 2;
1003                 break;
1004         case CHIP_RV630:
1005         case CHIP_RV635:
1006                 rdev->config.r600.max_pipes = 2;
1007                 rdev->config.r600.max_tile_pipes = 2;
1008                 rdev->config.r600.max_simds = 3;
1009                 rdev->config.r600.max_backends = 1;
1010                 rdev->config.r600.max_gprs = 128;
1011                 rdev->config.r600.max_threads = 192;
1012                 rdev->config.r600.max_stack_entries = 128;
1013                 rdev->config.r600.max_hw_contexts = 8;
1014                 rdev->config.r600.max_gs_threads = 4;
1015                 rdev->config.r600.sx_max_export_size = 128;
1016                 rdev->config.r600.sx_max_export_pos_size = 16;
1017                 rdev->config.r600.sx_max_export_smx_size = 128;
1018                 rdev->config.r600.sq_num_cf_insts = 2;
1019                 break;
1020         case CHIP_RV610:
1021         case CHIP_RV620:
1022         case CHIP_RS780:
1023         case CHIP_RS880:
1024                 rdev->config.r600.max_pipes = 1;
1025                 rdev->config.r600.max_tile_pipes = 1;
1026                 rdev->config.r600.max_simds = 2;
1027                 rdev->config.r600.max_backends = 1;
1028                 rdev->config.r600.max_gprs = 128;
1029                 rdev->config.r600.max_threads = 192;
1030                 rdev->config.r600.max_stack_entries = 128;
1031                 rdev->config.r600.max_hw_contexts = 4;
1032                 rdev->config.r600.max_gs_threads = 4;
1033                 rdev->config.r600.sx_max_export_size = 128;
1034                 rdev->config.r600.sx_max_export_pos_size = 16;
1035                 rdev->config.r600.sx_max_export_smx_size = 128;
1036                 rdev->config.r600.sq_num_cf_insts = 1;
1037                 break;
1038         case CHIP_RV670:
1039                 rdev->config.r600.max_pipes = 4;
1040                 rdev->config.r600.max_tile_pipes = 4;
1041                 rdev->config.r600.max_simds = 4;
1042                 rdev->config.r600.max_backends = 4;
1043                 rdev->config.r600.max_gprs = 192;
1044                 rdev->config.r600.max_threads = 192;
1045                 rdev->config.r600.max_stack_entries = 256;
1046                 rdev->config.r600.max_hw_contexts = 8;
1047                 rdev->config.r600.max_gs_threads = 16;
1048                 rdev->config.r600.sx_max_export_size = 128;
1049                 rdev->config.r600.sx_max_export_pos_size = 16;
1050                 rdev->config.r600.sx_max_export_smx_size = 128;
1051                 rdev->config.r600.sq_num_cf_insts = 2;
1052                 break;
1053         default:
1054                 break;
1055         }
1056
1057         /* Initialize HDP */
1058         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1059                 WREG32((0x2c14 + j), 0x00000000);
1060                 WREG32((0x2c18 + j), 0x00000000);
1061                 WREG32((0x2c1c + j), 0x00000000);
1062                 WREG32((0x2c20 + j), 0x00000000);
1063                 WREG32((0x2c24 + j), 0x00000000);
1064         }
1065
1066         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1067
1068         /* Setup tiling */
1069         tiling_config = 0;
1070         ramcfg = RREG32(RAMCFG);
1071         switch (rdev->config.r600.max_tile_pipes) {
1072         case 1:
1073                 tiling_config |= PIPE_TILING(0);
1074                 break;
1075         case 2:
1076                 tiling_config |= PIPE_TILING(1);
1077                 break;
1078         case 4:
1079                 tiling_config |= PIPE_TILING(2);
1080                 break;
1081         case 8:
1082                 tiling_config |= PIPE_TILING(3);
1083                 break;
1084         default:
1085                 break;
1086         }
1087         rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1088         rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1089         tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1090         tiling_config |= GROUP_SIZE(0);
1091         rdev->config.r600.tiling_group_size = 256;
1092         tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1093         if (tmp > 3) {
1094                 tiling_config |= ROW_TILING(3);
1095                 tiling_config |= SAMPLE_SPLIT(3);
1096         } else {
1097                 tiling_config |= ROW_TILING(tmp);
1098                 tiling_config |= SAMPLE_SPLIT(tmp);
1099         }
1100         tiling_config |= BANK_SWAPS(1);
1101
1102         cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1103         cc_rb_backend_disable |=
1104                 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1105
1106         cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1107         cc_gc_shader_pipe_config |=
1108                 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1109         cc_gc_shader_pipe_config |=
1110                 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1111
1112         backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1113                                                         (R6XX_MAX_BACKENDS -
1114                                                          r600_count_pipe_bits((cc_rb_backend_disable &
1115                                                                                R6XX_MAX_BACKENDS_MASK) >> 16)),
1116                                                         (cc_rb_backend_disable >> 16));
1117
1118         tiling_config |= BACKEND_MAP(backend_map);
1119         WREG32(GB_TILING_CONFIG, tiling_config);
1120         WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1121         WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1122
1123         /* Setup pipes */
1124         WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1125         WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1126         WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1127
1128         tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1129         WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1130         WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1131
1132         /* Setup some CP states */
1133         WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1134         WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1135
1136         WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1137                              SYNC_WALKER | SYNC_ALIGNER));
1138         /* Setup various GPU states */
1139         if (rdev->family == CHIP_RV670)
1140                 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1141
1142         tmp = RREG32(SX_DEBUG_1);
1143         tmp |= SMX_EVENT_RELEASE;
1144         if ((rdev->family > CHIP_R600))
1145                 tmp |= ENABLE_NEW_SMX_ADDRESS;
1146         WREG32(SX_DEBUG_1, tmp);
1147
1148         if (((rdev->family) == CHIP_R600) ||
1149             ((rdev->family) == CHIP_RV630) ||
1150             ((rdev->family) == CHIP_RV610) ||
1151             ((rdev->family) == CHIP_RV620) ||
1152             ((rdev->family) == CHIP_RS780) ||
1153             ((rdev->family) == CHIP_RS880)) {
1154                 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1155         } else {
1156                 WREG32(DB_DEBUG, 0);
1157         }
1158         WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1159                                DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1160
1161         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1162         WREG32(VGT_NUM_INSTANCES, 0);
1163
1164         WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1165         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1166
1167         tmp = RREG32(SQ_MS_FIFO_SIZES);
1168         if (((rdev->family) == CHIP_RV610) ||
1169             ((rdev->family) == CHIP_RV620) ||
1170             ((rdev->family) == CHIP_RS780) ||
1171             ((rdev->family) == CHIP_RS880)) {
1172                 tmp = (CACHE_FIFO_SIZE(0xa) |
1173                        FETCH_FIFO_HIWATER(0xa) |
1174                        DONE_FIFO_HIWATER(0xe0) |
1175                        ALU_UPDATE_FIFO_HIWATER(0x8));
1176         } else if (((rdev->family) == CHIP_R600) ||
1177                    ((rdev->family) == CHIP_RV630)) {
1178                 tmp &= ~DONE_FIFO_HIWATER(0xff);
1179                 tmp |= DONE_FIFO_HIWATER(0x4);
1180         }
1181         WREG32(SQ_MS_FIFO_SIZES, tmp);
1182
1183         /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1184          * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1185          */
1186         sq_config = RREG32(SQ_CONFIG);
1187         sq_config &= ~(PS_PRIO(3) |
1188                        VS_PRIO(3) |
1189                        GS_PRIO(3) |
1190                        ES_PRIO(3));
1191         sq_config |= (DX9_CONSTS |
1192                       VC_ENABLE |
1193                       PS_PRIO(0) |
1194                       VS_PRIO(1) |
1195                       GS_PRIO(2) |
1196                       ES_PRIO(3));
1197
1198         if ((rdev->family) == CHIP_R600) {
1199                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1200                                           NUM_VS_GPRS(124) |
1201                                           NUM_CLAUSE_TEMP_GPRS(4));
1202                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1203                                           NUM_ES_GPRS(0));
1204                 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1205                                            NUM_VS_THREADS(48) |
1206                                            NUM_GS_THREADS(4) |
1207                                            NUM_ES_THREADS(4));
1208                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1209                                             NUM_VS_STACK_ENTRIES(128));
1210                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1211                                             NUM_ES_STACK_ENTRIES(0));
1212         } else if (((rdev->family) == CHIP_RV610) ||
1213                    ((rdev->family) == CHIP_RV620) ||
1214                    ((rdev->family) == CHIP_RS780) ||
1215                    ((rdev->family) == CHIP_RS880)) {
1216                 /* no vertex cache */
1217                 sq_config &= ~VC_ENABLE;
1218
1219                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1220                                           NUM_VS_GPRS(44) |
1221                                           NUM_CLAUSE_TEMP_GPRS(2));
1222                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1223                                           NUM_ES_GPRS(17));
1224                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1225                                            NUM_VS_THREADS(78) |
1226                                            NUM_GS_THREADS(4) |
1227                                            NUM_ES_THREADS(31));
1228                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1229                                             NUM_VS_STACK_ENTRIES(40));
1230                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1231                                             NUM_ES_STACK_ENTRIES(16));
1232         } else if (((rdev->family) == CHIP_RV630) ||
1233                    ((rdev->family) == CHIP_RV635)) {
1234                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1235                                           NUM_VS_GPRS(44) |
1236                                           NUM_CLAUSE_TEMP_GPRS(2));
1237                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1238                                           NUM_ES_GPRS(18));
1239                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1240                                            NUM_VS_THREADS(78) |
1241                                            NUM_GS_THREADS(4) |
1242                                            NUM_ES_THREADS(31));
1243                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1244                                             NUM_VS_STACK_ENTRIES(40));
1245                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1246                                             NUM_ES_STACK_ENTRIES(16));
1247         } else if ((rdev->family) == CHIP_RV670) {
1248                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1249                                           NUM_VS_GPRS(44) |
1250                                           NUM_CLAUSE_TEMP_GPRS(2));
1251                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1252                                           NUM_ES_GPRS(17));
1253                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1254                                            NUM_VS_THREADS(78) |
1255                                            NUM_GS_THREADS(4) |
1256                                            NUM_ES_THREADS(31));
1257                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1258                                             NUM_VS_STACK_ENTRIES(64));
1259                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1260                                             NUM_ES_STACK_ENTRIES(64));
1261         }
1262
1263         WREG32(SQ_CONFIG, sq_config);
1264         WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1265         WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1266         WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1267         WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1268         WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1269
1270         if (((rdev->family) == CHIP_RV610) ||
1271             ((rdev->family) == CHIP_RV620) ||
1272             ((rdev->family) == CHIP_RS780) ||
1273             ((rdev->family) == CHIP_RS880)) {
1274                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1275         } else {
1276                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1277         }
1278
1279         /* More default values. 2D/3D driver should adjust as needed */
1280         WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1281                                          S1_X(0x4) | S1_Y(0xc)));
1282         WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1283                                          S1_X(0x2) | S1_Y(0x2) |
1284                                          S2_X(0xa) | S2_Y(0x6) |
1285                                          S3_X(0x6) | S3_Y(0xa)));
1286         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1287                                              S1_X(0x4) | S1_Y(0xc) |
1288                                              S2_X(0x1) | S2_Y(0x6) |
1289                                              S3_X(0xa) | S3_Y(0xe)));
1290         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1291                                              S5_X(0x0) | S5_Y(0x0) |
1292                                              S6_X(0xb) | S6_Y(0x4) |
1293                                              S7_X(0x7) | S7_Y(0x8)));
1294
1295         WREG32(VGT_STRMOUT_EN, 0);
1296         tmp = rdev->config.r600.max_pipes * 16;
1297         switch (rdev->family) {
1298         case CHIP_RV610:
1299         case CHIP_RV620:
1300         case CHIP_RS780:
1301         case CHIP_RS880:
1302                 tmp += 32;
1303                 break;
1304         case CHIP_RV670:
1305                 tmp += 128;
1306                 break;
1307         default:
1308                 break;
1309         }
1310         if (tmp > 256) {
1311                 tmp = 256;
1312         }
1313         WREG32(VGT_ES_PER_GS, 128);
1314         WREG32(VGT_GS_PER_ES, tmp);
1315         WREG32(VGT_GS_PER_VS, 2);
1316         WREG32(VGT_GS_VERTEX_REUSE, 16);
1317
1318         /* more default values. 2D/3D driver should adjust as needed */
1319         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1320         WREG32(VGT_STRMOUT_EN, 0);
1321         WREG32(SX_MISC, 0);
1322         WREG32(PA_SC_MODE_CNTL, 0);
1323         WREG32(PA_SC_AA_CONFIG, 0);
1324         WREG32(PA_SC_LINE_STIPPLE, 0);
1325         WREG32(SPI_INPUT_Z, 0);
1326         WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1327         WREG32(CB_COLOR7_FRAG, 0);
1328
1329         /* Clear render buffer base addresses */
1330         WREG32(CB_COLOR0_BASE, 0);
1331         WREG32(CB_COLOR1_BASE, 0);
1332         WREG32(CB_COLOR2_BASE, 0);
1333         WREG32(CB_COLOR3_BASE, 0);
1334         WREG32(CB_COLOR4_BASE, 0);
1335         WREG32(CB_COLOR5_BASE, 0);
1336         WREG32(CB_COLOR6_BASE, 0);
1337         WREG32(CB_COLOR7_BASE, 0);
1338         WREG32(CB_COLOR7_FRAG, 0);
1339
1340         switch (rdev->family) {
1341         case CHIP_RV610:
1342         case CHIP_RV620:
1343         case CHIP_RS780:
1344         case CHIP_RS880:
1345                 tmp = TC_L2_SIZE(8);
1346                 break;
1347         case CHIP_RV630:
1348         case CHIP_RV635:
1349                 tmp = TC_L2_SIZE(4);
1350                 break;
1351         case CHIP_R600:
1352                 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1353                 break;
1354         default:
1355                 tmp = TC_L2_SIZE(0);
1356                 break;
1357         }
1358         WREG32(TC_CNTL, tmp);
1359
1360         tmp = RREG32(HDP_HOST_PATH_CNTL);
1361         WREG32(HDP_HOST_PATH_CNTL, tmp);
1362
1363         tmp = RREG32(ARB_POP);
1364         tmp |= ENABLE_TC128;
1365         WREG32(ARB_POP, tmp);
1366
1367         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1368         WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1369                                NUM_CLIP_SEQ(3)));
1370         WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1371 }
1372
1373
1374 /*
1375  * Indirect registers accessor
1376  */
1377 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1378 {
1379         u32 r;
1380
1381         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1382         (void)RREG32(PCIE_PORT_INDEX);
1383         r = RREG32(PCIE_PORT_DATA);
1384         return r;
1385 }
1386
1387 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1388 {
1389         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1390         (void)RREG32(PCIE_PORT_INDEX);
1391         WREG32(PCIE_PORT_DATA, (v));
1392         (void)RREG32(PCIE_PORT_DATA);
1393 }
1394
1395 /*
1396  * CP & Ring
1397  */
1398 void r600_cp_stop(struct radeon_device *rdev)
1399 {
1400         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1401 }
1402
1403 int r600_init_microcode(struct radeon_device *rdev)
1404 {
1405         struct platform_device *pdev;
1406         const char *chip_name;
1407         const char *rlc_chip_name;
1408         size_t pfp_req_size, me_req_size, rlc_req_size;
1409         char fw_name[30];
1410         int err;
1411
1412         DRM_DEBUG("\n");
1413
1414         pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1415         err = IS_ERR(pdev);
1416         if (err) {
1417                 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1418                 return -EINVAL;
1419         }
1420
1421         switch (rdev->family) {
1422         case CHIP_R600:
1423                 chip_name = "R600";
1424                 rlc_chip_name = "R600";
1425                 break;
1426         case CHIP_RV610:
1427                 chip_name = "RV610";
1428                 rlc_chip_name = "R600";
1429                 break;
1430         case CHIP_RV630:
1431                 chip_name = "RV630";
1432                 rlc_chip_name = "R600";
1433                 break;
1434         case CHIP_RV620:
1435                 chip_name = "RV620";
1436                 rlc_chip_name = "R600";
1437                 break;
1438         case CHIP_RV635:
1439                 chip_name = "RV635";
1440                 rlc_chip_name = "R600";
1441                 break;
1442         case CHIP_RV670:
1443                 chip_name = "RV670";
1444                 rlc_chip_name = "R600";
1445                 break;
1446         case CHIP_RS780:
1447         case CHIP_RS880:
1448                 chip_name = "RS780";
1449                 rlc_chip_name = "R600";
1450                 break;
1451         case CHIP_RV770:
1452                 chip_name = "RV770";
1453                 rlc_chip_name = "R700";
1454                 break;
1455         case CHIP_RV730:
1456         case CHIP_RV740:
1457                 chip_name = "RV730";
1458                 rlc_chip_name = "R700";
1459                 break;
1460         case CHIP_RV710:
1461                 chip_name = "RV710";
1462                 rlc_chip_name = "R700";
1463                 break;
1464         case CHIP_CEDAR:
1465                 chip_name = "CEDAR";
1466                 rlc_chip_name = "CEDAR";
1467                 break;
1468         case CHIP_REDWOOD:
1469                 chip_name = "REDWOOD";
1470                 rlc_chip_name = "REDWOOD";
1471                 break;
1472         case CHIP_JUNIPER:
1473                 chip_name = "JUNIPER";
1474                 rlc_chip_name = "JUNIPER";
1475                 break;
1476         case CHIP_CYPRESS:
1477         case CHIP_HEMLOCK:
1478                 chip_name = "CYPRESS";
1479                 rlc_chip_name = "CYPRESS";
1480                 break;
1481         default: BUG();
1482         }
1483
1484         if (rdev->family >= CHIP_CEDAR) {
1485                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1486                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1487                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1488         } else if (rdev->family >= CHIP_RV770) {
1489                 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1490                 me_req_size = R700_PM4_UCODE_SIZE * 4;
1491                 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1492         } else {
1493                 pfp_req_size = PFP_UCODE_SIZE * 4;
1494                 me_req_size = PM4_UCODE_SIZE * 12;
1495                 rlc_req_size = RLC_UCODE_SIZE * 4;
1496         }
1497
1498         DRM_INFO("Loading %s Microcode\n", chip_name);
1499
1500         snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1501         err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1502         if (err)
1503                 goto out;
1504         if (rdev->pfp_fw->size != pfp_req_size) {
1505                 printk(KERN_ERR
1506                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1507                        rdev->pfp_fw->size, fw_name);
1508                 err = -EINVAL;
1509                 goto out;
1510         }
1511
1512         snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1513         err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1514         if (err)
1515                 goto out;
1516         if (rdev->me_fw->size != me_req_size) {
1517                 printk(KERN_ERR
1518                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1519                        rdev->me_fw->size, fw_name);
1520                 err = -EINVAL;
1521         }
1522
1523         snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1524         err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1525         if (err)
1526                 goto out;
1527         if (rdev->rlc_fw->size != rlc_req_size) {
1528                 printk(KERN_ERR
1529                        "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1530                        rdev->rlc_fw->size, fw_name);
1531                 err = -EINVAL;
1532         }
1533
1534 out:
1535         platform_device_unregister(pdev);
1536
1537         if (err) {
1538                 if (err != -EINVAL)
1539                         printk(KERN_ERR
1540                                "r600_cp: Failed to load firmware \"%s\"\n",
1541                                fw_name);
1542                 release_firmware(rdev->pfp_fw);
1543                 rdev->pfp_fw = NULL;
1544                 release_firmware(rdev->me_fw);
1545                 rdev->me_fw = NULL;
1546                 release_firmware(rdev->rlc_fw);
1547                 rdev->rlc_fw = NULL;
1548         }
1549         return err;
1550 }
1551
1552 static int r600_cp_load_microcode(struct radeon_device *rdev)
1553 {
1554         const __be32 *fw_data;
1555         int i;
1556
1557         if (!rdev->me_fw || !rdev->pfp_fw)
1558                 return -EINVAL;
1559
1560         r600_cp_stop(rdev);
1561
1562         WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1563
1564         /* Reset cp */
1565         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1566         RREG32(GRBM_SOFT_RESET);
1567         mdelay(15);
1568         WREG32(GRBM_SOFT_RESET, 0);
1569
1570         WREG32(CP_ME_RAM_WADDR, 0);
1571
1572         fw_data = (const __be32 *)rdev->me_fw->data;
1573         WREG32(CP_ME_RAM_WADDR, 0);
1574         for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1575                 WREG32(CP_ME_RAM_DATA,
1576                        be32_to_cpup(fw_data++));
1577
1578         fw_data = (const __be32 *)rdev->pfp_fw->data;
1579         WREG32(CP_PFP_UCODE_ADDR, 0);
1580         for (i = 0; i < PFP_UCODE_SIZE; i++)
1581                 WREG32(CP_PFP_UCODE_DATA,
1582                        be32_to_cpup(fw_data++));
1583
1584         WREG32(CP_PFP_UCODE_ADDR, 0);
1585         WREG32(CP_ME_RAM_WADDR, 0);
1586         WREG32(CP_ME_RAM_RADDR, 0);
1587         return 0;
1588 }
1589
1590 int r600_cp_start(struct radeon_device *rdev)
1591 {
1592         int r;
1593         uint32_t cp_me;
1594
1595         r = radeon_ring_lock(rdev, 7);
1596         if (r) {
1597                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1598                 return r;
1599         }
1600         radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1601         radeon_ring_write(rdev, 0x1);
1602         if (rdev->family >= CHIP_CEDAR) {
1603                 radeon_ring_write(rdev, 0x0);
1604                 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1605         } else if (rdev->family >= CHIP_RV770) {
1606                 radeon_ring_write(rdev, 0x0);
1607                 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1608         } else {
1609                 radeon_ring_write(rdev, 0x3);
1610                 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1611         }
1612         radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1613         radeon_ring_write(rdev, 0);
1614         radeon_ring_write(rdev, 0);
1615         radeon_ring_unlock_commit(rdev);
1616
1617         cp_me = 0xff;
1618         WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1619         return 0;
1620 }
1621
1622 int r600_cp_resume(struct radeon_device *rdev)
1623 {
1624         u32 tmp;
1625         u32 rb_bufsz;
1626         int r;
1627
1628         /* Reset cp */
1629         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1630         RREG32(GRBM_SOFT_RESET);
1631         mdelay(15);
1632         WREG32(GRBM_SOFT_RESET, 0);
1633
1634         /* Set ring buffer size */
1635         rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1636         tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1637 #ifdef __BIG_ENDIAN
1638         tmp |= BUF_SWAP_32BIT;
1639 #endif
1640         WREG32(CP_RB_CNTL, tmp);
1641         WREG32(CP_SEM_WAIT_TIMER, 0x4);
1642
1643         /* Set the write pointer delay */
1644         WREG32(CP_RB_WPTR_DELAY, 0);
1645
1646         /* Initialize the ring buffer's read and write pointers */
1647         WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1648         WREG32(CP_RB_RPTR_WR, 0);
1649         WREG32(CP_RB_WPTR, 0);
1650         WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1651         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1652         mdelay(1);
1653         WREG32(CP_RB_CNTL, tmp);
1654
1655         WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1656         WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1657
1658         rdev->cp.rptr = RREG32(CP_RB_RPTR);
1659         rdev->cp.wptr = RREG32(CP_RB_WPTR);
1660
1661         r600_cp_start(rdev);
1662         rdev->cp.ready = true;
1663         r = radeon_ring_test(rdev);
1664         if (r) {
1665                 rdev->cp.ready = false;
1666                 return r;
1667         }
1668         return 0;
1669 }
1670
1671 void r600_cp_commit(struct radeon_device *rdev)
1672 {
1673         WREG32(CP_RB_WPTR, rdev->cp.wptr);
1674         (void)RREG32(CP_RB_WPTR);
1675 }
1676
1677 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1678 {
1679         u32 rb_bufsz;
1680
1681         /* Align ring size */
1682         rb_bufsz = drm_order(ring_size / 8);
1683         ring_size = (1 << (rb_bufsz + 1)) * 4;
1684         rdev->cp.ring_size = ring_size;
1685         rdev->cp.align_mask = 16 - 1;
1686 }
1687
1688 void r600_cp_fini(struct radeon_device *rdev)
1689 {
1690         r600_cp_stop(rdev);
1691         radeon_ring_fini(rdev);
1692 }
1693
1694
1695 /*
1696  * GPU scratch registers helpers function.
1697  */
1698 void r600_scratch_init(struct radeon_device *rdev)
1699 {
1700         int i;
1701
1702         rdev->scratch.num_reg = 7;
1703         for (i = 0; i < rdev->scratch.num_reg; i++) {
1704                 rdev->scratch.free[i] = true;
1705                 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1706         }
1707 }
1708
1709 int r600_ring_test(struct radeon_device *rdev)
1710 {
1711         uint32_t scratch;
1712         uint32_t tmp = 0;
1713         unsigned i;
1714         int r;
1715
1716         r = radeon_scratch_get(rdev, &scratch);
1717         if (r) {
1718                 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1719                 return r;
1720         }
1721         WREG32(scratch, 0xCAFEDEAD);
1722         r = radeon_ring_lock(rdev, 3);
1723         if (r) {
1724                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1725                 radeon_scratch_free(rdev, scratch);
1726                 return r;
1727         }
1728         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1729         radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1730         radeon_ring_write(rdev, 0xDEADBEEF);
1731         radeon_ring_unlock_commit(rdev);
1732         for (i = 0; i < rdev->usec_timeout; i++) {
1733                 tmp = RREG32(scratch);
1734                 if (tmp == 0xDEADBEEF)
1735                         break;
1736                 DRM_UDELAY(1);
1737         }
1738         if (i < rdev->usec_timeout) {
1739                 DRM_INFO("ring test succeeded in %d usecs\n", i);
1740         } else {
1741                 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1742                           scratch, tmp);
1743                 r = -EINVAL;
1744         }
1745         radeon_scratch_free(rdev, scratch);
1746         return r;
1747 }
1748
1749 void r600_wb_disable(struct radeon_device *rdev)
1750 {
1751         int r;
1752
1753         WREG32(SCRATCH_UMSK, 0);
1754         if (rdev->wb.wb_obj) {
1755                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1756                 if (unlikely(r != 0))
1757                         return;
1758                 radeon_bo_kunmap(rdev->wb.wb_obj);
1759                 radeon_bo_unpin(rdev->wb.wb_obj);
1760                 radeon_bo_unreserve(rdev->wb.wb_obj);
1761         }
1762 }
1763
1764 void r600_wb_fini(struct radeon_device *rdev)
1765 {
1766         r600_wb_disable(rdev);
1767         if (rdev->wb.wb_obj) {
1768                 radeon_bo_unref(&rdev->wb.wb_obj);
1769                 rdev->wb.wb = NULL;
1770                 rdev->wb.wb_obj = NULL;
1771         }
1772 }
1773
1774 int r600_wb_enable(struct radeon_device *rdev)
1775 {
1776         int r;
1777
1778         if (rdev->wb.wb_obj == NULL) {
1779                 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1780                                 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1781                 if (r) {
1782                         dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1783                         return r;
1784                 }
1785                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1786                 if (unlikely(r != 0)) {
1787                         r600_wb_fini(rdev);
1788                         return r;
1789                 }
1790                 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1791                                 &rdev->wb.gpu_addr);
1792                 if (r) {
1793                         radeon_bo_unreserve(rdev->wb.wb_obj);
1794                         dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1795                         r600_wb_fini(rdev);
1796                         return r;
1797                 }
1798                 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1799                 radeon_bo_unreserve(rdev->wb.wb_obj);
1800                 if (r) {
1801                         dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1802                         r600_wb_fini(rdev);
1803                         return r;
1804                 }
1805         }
1806         WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
1807         WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
1808         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
1809         WREG32(SCRATCH_UMSK, 0xff);
1810         return 0;
1811 }
1812
1813 void r600_fence_ring_emit(struct radeon_device *rdev,
1814                           struct radeon_fence *fence)
1815 {
1816         /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
1817
1818         radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1819         radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1820         /* wait for 3D idle clean */
1821         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1822         radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1823         radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1824         /* Emit fence sequence & fire IRQ */
1825         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1826         radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1827         radeon_ring_write(rdev, fence->seq);
1828         /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1829         radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1830         radeon_ring_write(rdev, RB_INT_STAT);
1831 }
1832
1833 int r600_copy_blit(struct radeon_device *rdev,
1834                    uint64_t src_offset, uint64_t dst_offset,
1835                    unsigned num_pages, struct radeon_fence *fence)
1836 {
1837         int r;
1838
1839         mutex_lock(&rdev->r600_blit.mutex);
1840         rdev->r600_blit.vb_ib = NULL;
1841         r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1842         if (r) {
1843                 if (rdev->r600_blit.vb_ib)
1844                         radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1845                 mutex_unlock(&rdev->r600_blit.mutex);
1846                 return r;
1847         }
1848         r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1849         r600_blit_done_copy(rdev, fence);
1850         mutex_unlock(&rdev->r600_blit.mutex);
1851         return 0;
1852 }
1853
1854 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1855                          uint32_t tiling_flags, uint32_t pitch,
1856                          uint32_t offset, uint32_t obj_size)
1857 {
1858         /* FIXME: implement */
1859         return 0;
1860 }
1861
1862 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1863 {
1864         /* FIXME: implement */
1865 }
1866
1867
1868 bool r600_card_posted(struct radeon_device *rdev)
1869 {
1870         uint32_t reg;
1871
1872         /* first check CRTCs */
1873         reg = RREG32(D1CRTC_CONTROL) |
1874                 RREG32(D2CRTC_CONTROL);
1875         if (reg & CRTC_EN)
1876                 return true;
1877
1878         /* then check MEM_SIZE, in case the crtcs are off */
1879         if (RREG32(CONFIG_MEMSIZE))
1880                 return true;
1881
1882         return false;
1883 }
1884
1885 int r600_startup(struct radeon_device *rdev)
1886 {
1887         int r;
1888
1889         if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1890                 r = r600_init_microcode(rdev);
1891                 if (r) {
1892                         DRM_ERROR("Failed to load firmware!\n");
1893                         return r;
1894                 }
1895         }
1896
1897         r600_mc_program(rdev);
1898         if (rdev->flags & RADEON_IS_AGP) {
1899                 r600_agp_enable(rdev);
1900         } else {
1901                 r = r600_pcie_gart_enable(rdev);
1902                 if (r)
1903                         return r;
1904         }
1905         r600_gpu_init(rdev);
1906         r = r600_blit_init(rdev);
1907         if (r) {
1908                 r600_blit_fini(rdev);
1909                 rdev->asic->copy = NULL;
1910                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1911         }
1912         /* pin copy shader into vram */
1913         if (rdev->r600_blit.shader_obj) {
1914                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1915                 if (unlikely(r != 0))
1916                         return r;
1917                 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1918                                 &rdev->r600_blit.shader_gpu_addr);
1919                 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1920                 if (r) {
1921                         dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1922                         return r;
1923                 }
1924         }
1925         /* Enable IRQ */
1926         r = r600_irq_init(rdev);
1927         if (r) {
1928                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1929                 radeon_irq_kms_fini(rdev);
1930                 return r;
1931         }
1932         r600_irq_set(rdev);
1933
1934         r = radeon_ring_init(rdev, rdev->cp.ring_size);
1935         if (r)
1936                 return r;
1937         r = r600_cp_load_microcode(rdev);
1938         if (r)
1939                 return r;
1940         r = r600_cp_resume(rdev);
1941         if (r)
1942                 return r;
1943         /* write back buffer are not vital so don't worry about failure */
1944         r600_wb_enable(rdev);
1945         return 0;
1946 }
1947
1948 void r600_vga_set_state(struct radeon_device *rdev, bool state)
1949 {
1950         uint32_t temp;
1951
1952         temp = RREG32(CONFIG_CNTL);
1953         if (state == false) {
1954                 temp &= ~(1<<0);
1955                 temp |= (1<<1);
1956         } else {
1957                 temp &= ~(1<<1);
1958         }
1959         WREG32(CONFIG_CNTL, temp);
1960 }
1961
1962 int r600_resume(struct radeon_device *rdev)
1963 {
1964         int r;
1965
1966         /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1967          * posting will perform necessary task to bring back GPU into good
1968          * shape.
1969          */
1970         /* post card */
1971         atom_asic_init(rdev->mode_info.atom_context);
1972         /* Initialize clocks */
1973         r = radeon_clocks_init(rdev);
1974         if (r) {
1975                 return r;
1976         }
1977
1978         r = r600_startup(rdev);
1979         if (r) {
1980                 DRM_ERROR("r600 startup failed on resume\n");
1981                 return r;
1982         }
1983
1984         r = r600_ib_test(rdev);
1985         if (r) {
1986                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1987                 return r;
1988         }
1989
1990         r = r600_audio_init(rdev);
1991         if (r) {
1992                 DRM_ERROR("radeon: audio resume failed\n");
1993                 return r;
1994         }
1995
1996         return r;
1997 }
1998
1999 int r600_suspend(struct radeon_device *rdev)
2000 {
2001         int r;
2002
2003         r600_audio_fini(rdev);
2004         /* FIXME: we should wait for ring to be empty */
2005         r600_cp_stop(rdev);
2006         rdev->cp.ready = false;
2007         r600_irq_suspend(rdev);
2008         r600_wb_disable(rdev);
2009         r600_pcie_gart_disable(rdev);
2010         /* unpin shaders bo */
2011         if (rdev->r600_blit.shader_obj) {
2012                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2013                 if (!r) {
2014                         radeon_bo_unpin(rdev->r600_blit.shader_obj);
2015                         radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2016                 }
2017         }
2018         return 0;
2019 }
2020
2021 /* Plan is to move initialization in that function and use
2022  * helper function so that radeon_device_init pretty much
2023  * do nothing more than calling asic specific function. This
2024  * should also allow to remove a bunch of callback function
2025  * like vram_info.
2026  */
2027 int r600_init(struct radeon_device *rdev)
2028 {
2029         int r;
2030
2031         r = radeon_dummy_page_init(rdev);
2032         if (r)
2033                 return r;
2034         if (r600_debugfs_mc_info_init(rdev)) {
2035                 DRM_ERROR("Failed to register debugfs file for mc !\n");
2036         }
2037         /* This don't do much */
2038         r = radeon_gem_init(rdev);
2039         if (r)
2040                 return r;
2041         /* Read BIOS */
2042         if (!radeon_get_bios(rdev)) {
2043                 if (ASIC_IS_AVIVO(rdev))
2044                         return -EINVAL;
2045         }
2046         /* Must be an ATOMBIOS */
2047         if (!rdev->is_atom_bios) {
2048                 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2049                 return -EINVAL;
2050         }
2051         r = radeon_atombios_init(rdev);
2052         if (r)
2053                 return r;
2054         /* Post card if necessary */
2055         if (!r600_card_posted(rdev)) {
2056                 if (!rdev->bios) {
2057                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2058                         return -EINVAL;
2059                 }
2060                 DRM_INFO("GPU not posted. posting now...\n");
2061                 atom_asic_init(rdev->mode_info.atom_context);
2062         }
2063         /* Initialize scratch registers */
2064         r600_scratch_init(rdev);
2065         /* Initialize surface registers */
2066         radeon_surface_init(rdev);
2067         /* Initialize clocks */
2068         radeon_get_clock_info(rdev->ddev);
2069         r = radeon_clocks_init(rdev);
2070         if (r)
2071                 return r;
2072         /* Initialize power management */
2073         radeon_pm_init(rdev);
2074         /* Fence driver */
2075         r = radeon_fence_driver_init(rdev);
2076         if (r)
2077                 return r;
2078         if (rdev->flags & RADEON_IS_AGP) {
2079                 r = radeon_agp_init(rdev);
2080                 if (r)
2081                         radeon_agp_disable(rdev);
2082         }
2083         r = r600_mc_init(rdev);
2084         if (r)
2085                 return r;
2086         /* Memory manager */
2087         r = radeon_bo_init(rdev);
2088         if (r)
2089                 return r;
2090
2091         r = radeon_irq_kms_init(rdev);
2092         if (r)
2093                 return r;
2094
2095         rdev->cp.ring_obj = NULL;
2096         r600_ring_init(rdev, 1024 * 1024);
2097
2098         rdev->ih.ring_obj = NULL;
2099         r600_ih_ring_init(rdev, 64 * 1024);
2100
2101         r = r600_pcie_gart_init(rdev);
2102         if (r)
2103                 return r;
2104
2105         rdev->accel_working = true;
2106         r = r600_startup(rdev);
2107         if (r) {
2108                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2109                 r600_cp_fini(rdev);
2110                 r600_wb_fini(rdev);
2111                 r600_irq_fini(rdev);
2112                 radeon_irq_kms_fini(rdev);
2113                 r600_pcie_gart_fini(rdev);
2114                 rdev->accel_working = false;
2115         }
2116         if (rdev->accel_working) {
2117                 r = radeon_ib_pool_init(rdev);
2118                 if (r) {
2119                         dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2120                         rdev->accel_working = false;
2121                 } else {
2122                         r = r600_ib_test(rdev);
2123                         if (r) {
2124                                 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2125                                 rdev->accel_working = false;
2126                         }
2127                 }
2128         }
2129
2130         r = r600_audio_init(rdev);
2131         if (r)
2132                 return r; /* TODO error handling */
2133         return 0;
2134 }
2135
2136 void r600_fini(struct radeon_device *rdev)
2137 {
2138         radeon_pm_fini(rdev);
2139         r600_audio_fini(rdev);
2140         r600_blit_fini(rdev);
2141         r600_cp_fini(rdev);
2142         r600_wb_fini(rdev);
2143         r600_irq_fini(rdev);
2144         radeon_irq_kms_fini(rdev);
2145         r600_pcie_gart_fini(rdev);
2146         radeon_agp_fini(rdev);
2147         radeon_gem_fini(rdev);
2148         radeon_fence_driver_fini(rdev);
2149         radeon_clocks_fini(rdev);
2150         radeon_bo_fini(rdev);
2151         radeon_atombios_fini(rdev);
2152         kfree(rdev->bios);
2153         rdev->bios = NULL;
2154         radeon_dummy_page_fini(rdev);
2155 }
2156
2157
2158 /*
2159  * CS stuff
2160  */
2161 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2162 {
2163         /* FIXME: implement */
2164         radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2165         radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2166         radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2167         radeon_ring_write(rdev, ib->length_dw);
2168 }
2169
2170 int r600_ib_test(struct radeon_device *rdev)
2171 {
2172         struct radeon_ib *ib;
2173         uint32_t scratch;
2174         uint32_t tmp = 0;
2175         unsigned i;
2176         int r;
2177
2178         r = radeon_scratch_get(rdev, &scratch);
2179         if (r) {
2180                 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2181                 return r;
2182         }
2183         WREG32(scratch, 0xCAFEDEAD);
2184         r = radeon_ib_get(rdev, &ib);
2185         if (r) {
2186                 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2187                 return r;
2188         }
2189         ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2190         ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2191         ib->ptr[2] = 0xDEADBEEF;
2192         ib->ptr[3] = PACKET2(0);
2193         ib->ptr[4] = PACKET2(0);
2194         ib->ptr[5] = PACKET2(0);
2195         ib->ptr[6] = PACKET2(0);
2196         ib->ptr[7] = PACKET2(0);
2197         ib->ptr[8] = PACKET2(0);
2198         ib->ptr[9] = PACKET2(0);
2199         ib->ptr[10] = PACKET2(0);
2200         ib->ptr[11] = PACKET2(0);
2201         ib->ptr[12] = PACKET2(0);
2202         ib->ptr[13] = PACKET2(0);
2203         ib->ptr[14] = PACKET2(0);
2204         ib->ptr[15] = PACKET2(0);
2205         ib->length_dw = 16;
2206         r = radeon_ib_schedule(rdev, ib);
2207         if (r) {
2208                 radeon_scratch_free(rdev, scratch);
2209                 radeon_ib_free(rdev, &ib);
2210                 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2211                 return r;
2212         }
2213         r = radeon_fence_wait(ib->fence, false);
2214         if (r) {
2215                 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2216                 return r;
2217         }
2218         for (i = 0; i < rdev->usec_timeout; i++) {
2219                 tmp = RREG32(scratch);
2220                 if (tmp == 0xDEADBEEF)
2221                         break;
2222                 DRM_UDELAY(1);
2223         }
2224         if (i < rdev->usec_timeout) {
2225                 DRM_INFO("ib test succeeded in %u usecs\n", i);
2226         } else {
2227                 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2228                           scratch, tmp);
2229                 r = -EINVAL;
2230         }
2231         radeon_scratch_free(rdev, scratch);
2232         radeon_ib_free(rdev, &ib);
2233         return r;
2234 }
2235
2236 /*
2237  * Interrupts
2238  *
2239  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2240  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2241  * writing to the ring and the GPU consuming, the GPU writes to the ring
2242  * and host consumes.  As the host irq handler processes interrupts, it
2243  * increments the rptr.  When the rptr catches up with the wptr, all the
2244  * current interrupts have been processed.
2245  */
2246
2247 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2248 {
2249         u32 rb_bufsz;
2250
2251         /* Align ring size */
2252         rb_bufsz = drm_order(ring_size / 4);
2253         ring_size = (1 << rb_bufsz) * 4;
2254         rdev->ih.ring_size = ring_size;
2255         rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2256         rdev->ih.rptr = 0;
2257 }
2258
2259 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2260 {
2261         int r;
2262
2263         /* Allocate ring buffer */
2264         if (rdev->ih.ring_obj == NULL) {
2265                 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2266                                      true,
2267                                      RADEON_GEM_DOMAIN_GTT,
2268                                      &rdev->ih.ring_obj);
2269                 if (r) {
2270                         DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2271                         return r;
2272                 }
2273                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2274                 if (unlikely(r != 0))
2275                         return r;
2276                 r = radeon_bo_pin(rdev->ih.ring_obj,
2277                                   RADEON_GEM_DOMAIN_GTT,
2278                                   &rdev->ih.gpu_addr);
2279                 if (r) {
2280                         radeon_bo_unreserve(rdev->ih.ring_obj);
2281                         DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2282                         return r;
2283                 }
2284                 r = radeon_bo_kmap(rdev->ih.ring_obj,
2285                                    (void **)&rdev->ih.ring);
2286                 radeon_bo_unreserve(rdev->ih.ring_obj);
2287                 if (r) {
2288                         DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2289                         return r;
2290                 }
2291         }
2292         return 0;
2293 }
2294
2295 static void r600_ih_ring_fini(struct radeon_device *rdev)
2296 {
2297         int r;
2298         if (rdev->ih.ring_obj) {
2299                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2300                 if (likely(r == 0)) {
2301                         radeon_bo_kunmap(rdev->ih.ring_obj);
2302                         radeon_bo_unpin(rdev->ih.ring_obj);
2303                         radeon_bo_unreserve(rdev->ih.ring_obj);
2304                 }
2305                 radeon_bo_unref(&rdev->ih.ring_obj);
2306                 rdev->ih.ring = NULL;
2307                 rdev->ih.ring_obj = NULL;
2308         }
2309 }
2310
2311 void r600_rlc_stop(struct radeon_device *rdev)
2312 {
2313
2314         if ((rdev->family >= CHIP_RV770) &&
2315             (rdev->family <= CHIP_RV740)) {
2316                 /* r7xx asics need to soft reset RLC before halting */
2317                 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2318                 RREG32(SRBM_SOFT_RESET);
2319                 udelay(15000);
2320                 WREG32(SRBM_SOFT_RESET, 0);
2321                 RREG32(SRBM_SOFT_RESET);
2322         }
2323
2324         WREG32(RLC_CNTL, 0);
2325 }
2326
2327 static void r600_rlc_start(struct radeon_device *rdev)
2328 {
2329         WREG32(RLC_CNTL, RLC_ENABLE);
2330 }
2331
2332 static int r600_rlc_init(struct radeon_device *rdev)
2333 {
2334         u32 i;
2335         const __be32 *fw_data;
2336
2337         if (!rdev->rlc_fw)
2338                 return -EINVAL;
2339
2340         r600_rlc_stop(rdev);
2341
2342         WREG32(RLC_HB_BASE, 0);
2343         WREG32(RLC_HB_CNTL, 0);
2344         WREG32(RLC_HB_RPTR, 0);
2345         WREG32(RLC_HB_WPTR, 0);
2346         WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2347         WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2348         WREG32(RLC_MC_CNTL, 0);
2349         WREG32(RLC_UCODE_CNTL, 0);
2350
2351         fw_data = (const __be32 *)rdev->rlc_fw->data;
2352         if (rdev->family >= CHIP_CEDAR) {
2353                 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2354                         WREG32(RLC_UCODE_ADDR, i);
2355                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2356                 }
2357         } else if (rdev->family >= CHIP_RV770) {
2358                 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2359                         WREG32(RLC_UCODE_ADDR, i);
2360                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2361                 }
2362         } else {
2363                 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2364                         WREG32(RLC_UCODE_ADDR, i);
2365                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2366                 }
2367         }
2368         WREG32(RLC_UCODE_ADDR, 0);
2369
2370         r600_rlc_start(rdev);
2371
2372         return 0;
2373 }
2374
2375 static void r600_enable_interrupts(struct radeon_device *rdev)
2376 {
2377         u32 ih_cntl = RREG32(IH_CNTL);
2378         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2379
2380         ih_cntl |= ENABLE_INTR;
2381         ih_rb_cntl |= IH_RB_ENABLE;
2382         WREG32(IH_CNTL, ih_cntl);
2383         WREG32(IH_RB_CNTL, ih_rb_cntl);
2384         rdev->ih.enabled = true;
2385 }
2386
2387 void r600_disable_interrupts(struct radeon_device *rdev)
2388 {
2389         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2390         u32 ih_cntl = RREG32(IH_CNTL);
2391
2392         ih_rb_cntl &= ~IH_RB_ENABLE;
2393         ih_cntl &= ~ENABLE_INTR;
2394         WREG32(IH_RB_CNTL, ih_rb_cntl);
2395         WREG32(IH_CNTL, ih_cntl);
2396         /* set rptr, wptr to 0 */
2397         WREG32(IH_RB_RPTR, 0);
2398         WREG32(IH_RB_WPTR, 0);
2399         rdev->ih.enabled = false;
2400         rdev->ih.wptr = 0;
2401         rdev->ih.rptr = 0;
2402 }
2403
2404 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2405 {
2406         u32 tmp;
2407
2408         WREG32(CP_INT_CNTL, 0);
2409         WREG32(GRBM_INT_CNTL, 0);
2410         WREG32(DxMODE_INT_MASK, 0);
2411         if (ASIC_IS_DCE3(rdev)) {
2412                 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2413                 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2414                 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2415                 WREG32(DC_HPD1_INT_CONTROL, tmp);
2416                 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2417                 WREG32(DC_HPD2_INT_CONTROL, tmp);
2418                 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2419                 WREG32(DC_HPD3_INT_CONTROL, tmp);
2420                 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2421                 WREG32(DC_HPD4_INT_CONTROL, tmp);
2422                 if (ASIC_IS_DCE32(rdev)) {
2423                         tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2424                         WREG32(DC_HPD5_INT_CONTROL, tmp);
2425                         tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2426                         WREG32(DC_HPD6_INT_CONTROL, tmp);
2427                 }
2428         } else {
2429                 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2430                 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2431                 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2432                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2433                 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2434                 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2435                 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2436                 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2437         }
2438 }
2439
2440 int r600_irq_init(struct radeon_device *rdev)
2441 {
2442         int ret = 0;
2443         int rb_bufsz;
2444         u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2445
2446         /* allocate ring */
2447         ret = r600_ih_ring_alloc(rdev);
2448         if (ret)
2449                 return ret;
2450
2451         /* disable irqs */
2452         r600_disable_interrupts(rdev);
2453
2454         /* init rlc */
2455         ret = r600_rlc_init(rdev);
2456         if (ret) {
2457                 r600_ih_ring_fini(rdev);
2458                 return ret;
2459         }
2460
2461         /* setup interrupt control */
2462         /* set dummy read address to ring address */
2463         WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2464         interrupt_cntl = RREG32(INTERRUPT_CNTL);
2465         /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2466          * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2467          */
2468         interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2469         /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2470         interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2471         WREG32(INTERRUPT_CNTL, interrupt_cntl);
2472
2473         WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2474         rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2475
2476         ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2477                       IH_WPTR_OVERFLOW_CLEAR |
2478                       (rb_bufsz << 1));
2479         /* WPTR writeback, not yet */
2480         /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2481         WREG32(IH_RB_WPTR_ADDR_LO, 0);
2482         WREG32(IH_RB_WPTR_ADDR_HI, 0);
2483
2484         WREG32(IH_RB_CNTL, ih_rb_cntl);
2485
2486         /* set rptr, wptr to 0 */
2487         WREG32(IH_RB_RPTR, 0);
2488         WREG32(IH_RB_WPTR, 0);
2489
2490         /* Default settings for IH_CNTL (disabled at first) */
2491         ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2492         /* RPTR_REARM only works if msi's are enabled */
2493         if (rdev->msi_enabled)
2494                 ih_cntl |= RPTR_REARM;
2495
2496 #ifdef __BIG_ENDIAN
2497         ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2498 #endif
2499         WREG32(IH_CNTL, ih_cntl);
2500
2501         /* force the active interrupt state to all disabled */
2502         if (rdev->family >= CHIP_CEDAR)
2503                 evergreen_disable_interrupt_state(rdev);
2504         else
2505                 r600_disable_interrupt_state(rdev);
2506
2507         /* enable irqs */
2508         r600_enable_interrupts(rdev);
2509
2510         return ret;
2511 }
2512
2513 void r600_irq_suspend(struct radeon_device *rdev)
2514 {
2515         r600_irq_disable(rdev);
2516         r600_rlc_stop(rdev);
2517 }
2518
2519 void r600_irq_fini(struct radeon_device *rdev)
2520 {
2521         r600_irq_suspend(rdev);
2522         r600_ih_ring_fini(rdev);
2523 }
2524
2525 int r600_irq_set(struct radeon_device *rdev)
2526 {
2527         u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2528         u32 mode_int = 0;
2529         u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2530
2531         if (!rdev->irq.installed) {
2532                 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2533                 return -EINVAL;
2534         }
2535         /* don't enable anything if the ih is disabled */
2536         if (!rdev->ih.enabled) {
2537                 r600_disable_interrupts(rdev);
2538                 /* force the active interrupt state to all disabled */
2539                 r600_disable_interrupt_state(rdev);
2540                 return 0;
2541         }
2542
2543         if (ASIC_IS_DCE3(rdev)) {
2544                 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2545                 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2546                 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2547                 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2548                 if (ASIC_IS_DCE32(rdev)) {
2549                         hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2550                         hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2551                 }
2552         } else {
2553                 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2554                 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2555                 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2556         }
2557
2558         if (rdev->irq.sw_int) {
2559                 DRM_DEBUG("r600_irq_set: sw int\n");
2560                 cp_int_cntl |= RB_INT_ENABLE;
2561         }
2562         if (rdev->irq.crtc_vblank_int[0]) {
2563                 DRM_DEBUG("r600_irq_set: vblank 0\n");
2564                 mode_int |= D1MODE_VBLANK_INT_MASK;
2565         }
2566         if (rdev->irq.crtc_vblank_int[1]) {
2567                 DRM_DEBUG("r600_irq_set: vblank 1\n");
2568                 mode_int |= D2MODE_VBLANK_INT_MASK;
2569         }
2570         if (rdev->irq.hpd[0]) {
2571                 DRM_DEBUG("r600_irq_set: hpd 1\n");
2572                 hpd1 |= DC_HPDx_INT_EN;
2573         }
2574         if (rdev->irq.hpd[1]) {
2575                 DRM_DEBUG("r600_irq_set: hpd 2\n");
2576                 hpd2 |= DC_HPDx_INT_EN;
2577         }
2578         if (rdev->irq.hpd[2]) {
2579                 DRM_DEBUG("r600_irq_set: hpd 3\n");
2580                 hpd3 |= DC_HPDx_INT_EN;
2581         }
2582         if (rdev->irq.hpd[3]) {
2583                 DRM_DEBUG("r600_irq_set: hpd 4\n");
2584                 hpd4 |= DC_HPDx_INT_EN;
2585         }
2586         if (rdev->irq.hpd[4]) {
2587                 DRM_DEBUG("r600_irq_set: hpd 5\n");
2588                 hpd5 |= DC_HPDx_INT_EN;
2589         }
2590         if (rdev->irq.hpd[5]) {
2591                 DRM_DEBUG("r600_irq_set: hpd 6\n");
2592                 hpd6 |= DC_HPDx_INT_EN;
2593         }
2594
2595         WREG32(CP_INT_CNTL, cp_int_cntl);
2596         WREG32(DxMODE_INT_MASK, mode_int);
2597         if (ASIC_IS_DCE3(rdev)) {
2598                 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2599                 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2600                 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2601                 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2602                 if (ASIC_IS_DCE32(rdev)) {
2603                         WREG32(DC_HPD5_INT_CONTROL, hpd5);
2604                         WREG32(DC_HPD6_INT_CONTROL, hpd6);
2605                 }
2606         } else {
2607                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2608                 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2609                 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2610         }
2611
2612         return 0;
2613 }
2614
2615 static inline void r600_irq_ack(struct radeon_device *rdev,
2616                                 u32 *disp_int,
2617                                 u32 *disp_int_cont,
2618                                 u32 *disp_int_cont2)
2619 {
2620         u32 tmp;
2621
2622         if (ASIC_IS_DCE3(rdev)) {
2623                 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2624                 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2625                 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2626         } else {
2627                 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2628                 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2629                 *disp_int_cont2 = 0;
2630         }
2631
2632         if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2633                 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2634         if (*disp_int & LB_D1_VLINE_INTERRUPT)
2635                 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2636         if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2637                 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2638         if (*disp_int & LB_D2_VLINE_INTERRUPT)
2639                 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2640         if (*disp_int & DC_HPD1_INTERRUPT) {
2641                 if (ASIC_IS_DCE3(rdev)) {
2642                         tmp = RREG32(DC_HPD1_INT_CONTROL);
2643                         tmp |= DC_HPDx_INT_ACK;
2644                         WREG32(DC_HPD1_INT_CONTROL, tmp);
2645                 } else {
2646                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2647                         tmp |= DC_HPDx_INT_ACK;
2648                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2649                 }
2650         }
2651         if (*disp_int & DC_HPD2_INTERRUPT) {
2652                 if (ASIC_IS_DCE3(rdev)) {
2653                         tmp = RREG32(DC_HPD2_INT_CONTROL);
2654                         tmp |= DC_HPDx_INT_ACK;
2655                         WREG32(DC_HPD2_INT_CONTROL, tmp);
2656                 } else {
2657                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2658                         tmp |= DC_HPDx_INT_ACK;
2659                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2660                 }
2661         }
2662         if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2663                 if (ASIC_IS_DCE3(rdev)) {
2664                         tmp = RREG32(DC_HPD3_INT_CONTROL);
2665                         tmp |= DC_HPDx_INT_ACK;
2666                         WREG32(DC_HPD3_INT_CONTROL, tmp);
2667                 } else {
2668                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2669                         tmp |= DC_HPDx_INT_ACK;
2670                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2671                 }
2672         }
2673         if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2674                 tmp = RREG32(DC_HPD4_INT_CONTROL);
2675                 tmp |= DC_HPDx_INT_ACK;
2676                 WREG32(DC_HPD4_INT_CONTROL, tmp);
2677         }
2678         if (ASIC_IS_DCE32(rdev)) {
2679                 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2680                         tmp = RREG32(DC_HPD5_INT_CONTROL);
2681                         tmp |= DC_HPDx_INT_ACK;
2682                         WREG32(DC_HPD5_INT_CONTROL, tmp);
2683                 }
2684                 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2685                         tmp = RREG32(DC_HPD5_INT_CONTROL);
2686                         tmp |= DC_HPDx_INT_ACK;
2687                         WREG32(DC_HPD6_INT_CONTROL, tmp);
2688                 }
2689         }
2690 }
2691
2692 void r600_irq_disable(struct radeon_device *rdev)
2693 {
2694         u32 disp_int, disp_int_cont, disp_int_cont2;
2695
2696         r600_disable_interrupts(rdev);
2697         /* Wait and acknowledge irq */
2698         mdelay(1);
2699         r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2700         r600_disable_interrupt_state(rdev);
2701 }
2702
2703 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2704 {
2705         u32 wptr, tmp;
2706
2707         /* XXX use writeback */
2708         wptr = RREG32(IH_RB_WPTR);
2709
2710         if (wptr & RB_OVERFLOW) {
2711                 /* When a ring buffer overflow happen start parsing interrupt
2712                  * from the last not overwritten vector (wptr + 16). Hopefully
2713                  * this should allow us to catchup.
2714                  */
2715                 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2716                         wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2717                 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2718                 tmp = RREG32(IH_RB_CNTL);
2719                 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2720                 WREG32(IH_RB_CNTL, tmp);
2721         }
2722         return (wptr & rdev->ih.ptr_mask);
2723 }
2724
2725 /*        r600 IV Ring
2726  * Each IV ring entry is 128 bits:
2727  * [7:0]    - interrupt source id
2728  * [31:8]   - reserved
2729  * [59:32]  - interrupt source data
2730  * [127:60]  - reserved
2731  *
2732  * The basic interrupt vector entries
2733  * are decoded as follows:
2734  * src_id  src_data  description
2735  *      1         0  D1 Vblank
2736  *      1         1  D1 Vline
2737  *      5         0  D2 Vblank
2738  *      5         1  D2 Vline
2739  *     19         0  FP Hot plug detection A
2740  *     19         1  FP Hot plug detection B
2741  *     19         2  DAC A auto-detection
2742  *     19         3  DAC B auto-detection
2743  *    176         -  CP_INT RB
2744  *    177         -  CP_INT IB1
2745  *    178         -  CP_INT IB2
2746  *    181         -  EOP Interrupt
2747  *    233         -  GUI Idle
2748  *
2749  * Note, these are based on r600 and may need to be
2750  * adjusted or added to on newer asics
2751  */
2752
2753 int r600_irq_process(struct radeon_device *rdev)
2754 {
2755         u32 wptr = r600_get_ih_wptr(rdev);
2756         u32 rptr = rdev->ih.rptr;
2757         u32 src_id, src_data;
2758         u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2759         unsigned long flags;
2760         bool queue_hotplug = false;
2761
2762         DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2763         if (!rdev->ih.enabled)
2764                 return IRQ_NONE;
2765
2766         spin_lock_irqsave(&rdev->ih.lock, flags);
2767
2768         if (rptr == wptr) {
2769                 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2770                 return IRQ_NONE;
2771         }
2772         if (rdev->shutdown) {
2773                 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2774                 return IRQ_NONE;
2775         }
2776
2777 restart_ih:
2778         /* display interrupts */
2779         r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2780
2781         rdev->ih.wptr = wptr;
2782         while (rptr != wptr) {
2783                 /* wptr/rptr are in bytes! */
2784                 ring_index = rptr / 4;
2785                 src_id =  rdev->ih.ring[ring_index] & 0xff;
2786                 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2787
2788                 switch (src_id) {
2789                 case 1: /* D1 vblank/vline */
2790                         switch (src_data) {
2791                         case 0: /* D1 vblank */
2792                                 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2793                                         drm_handle_vblank(rdev->ddev, 0);
2794                                         rdev->pm.vblank_sync = true;
2795                                         wake_up(&rdev->irq.vblank_queue);
2796                                         disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2797                                         DRM_DEBUG("IH: D1 vblank\n");
2798                                 }
2799                                 break;
2800                         case 1: /* D1 vline */
2801                                 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2802                                         disp_int &= ~LB_D1_VLINE_INTERRUPT;
2803                                         DRM_DEBUG("IH: D1 vline\n");
2804                                 }
2805                                 break;
2806                         default:
2807                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2808                                 break;
2809                         }
2810                         break;
2811                 case 5: /* D2 vblank/vline */
2812                         switch (src_data) {
2813                         case 0: /* D2 vblank */
2814                                 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2815                                         drm_handle_vblank(rdev->ddev, 1);
2816                                         rdev->pm.vblank_sync = true;
2817                                         wake_up(&rdev->irq.vblank_queue);
2818                                         disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2819                                         DRM_DEBUG("IH: D2 vblank\n");
2820                                 }
2821                                 break;
2822                         case 1: /* D1 vline */
2823                                 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2824                                         disp_int &= ~LB_D2_VLINE_INTERRUPT;
2825                                         DRM_DEBUG("IH: D2 vline\n");
2826                                 }
2827                                 break;
2828                         default:
2829                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2830                                 break;
2831                         }
2832                         break;
2833                 case 19: /* HPD/DAC hotplug */
2834                         switch (src_data) {
2835                         case 0:
2836                                 if (disp_int & DC_HPD1_INTERRUPT) {
2837                                         disp_int &= ~DC_HPD1_INTERRUPT;
2838                                         queue_hotplug = true;
2839                                         DRM_DEBUG("IH: HPD1\n");
2840                                 }
2841                                 break;
2842                         case 1:
2843                                 if (disp_int & DC_HPD2_INTERRUPT) {
2844                                         disp_int &= ~DC_HPD2_INTERRUPT;
2845                                         queue_hotplug = true;
2846                                         DRM_DEBUG("IH: HPD2\n");
2847                                 }
2848                                 break;
2849                         case 4:
2850                                 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2851                                         disp_int_cont &= ~DC_HPD3_INTERRUPT;
2852                                         queue_hotplug = true;
2853                                         DRM_DEBUG("IH: HPD3\n");
2854                                 }
2855                                 break;
2856                         case 5:
2857                                 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2858                                         disp_int_cont &= ~DC_HPD4_INTERRUPT;
2859                                         queue_hotplug = true;
2860                                         DRM_DEBUG("IH: HPD4\n");
2861                                 }
2862                                 break;
2863                         case 10:
2864                                 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2865                                         disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
2866                                         queue_hotplug = true;
2867                                         DRM_DEBUG("IH: HPD5\n");
2868                                 }
2869                                 break;
2870                         case 12:
2871                                 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2872                                         disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
2873                                         queue_hotplug = true;
2874                                         DRM_DEBUG("IH: HPD6\n");
2875                                 }
2876                                 break;
2877                         default:
2878                                 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2879                                 break;
2880                         }
2881                         break;
2882                 case 176: /* CP_INT in ring buffer */
2883                 case 177: /* CP_INT in IB1 */
2884                 case 178: /* CP_INT in IB2 */
2885                         DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2886                         radeon_fence_process(rdev);
2887                         break;
2888                 case 181: /* CP EOP event */
2889                         DRM_DEBUG("IH: CP EOP\n");
2890                         break;
2891                 default:
2892                         DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2893                         break;
2894                 }
2895
2896                 /* wptr/rptr are in bytes! */
2897                 rptr += 16;
2898                 rptr &= rdev->ih.ptr_mask;
2899         }
2900         /* make sure wptr hasn't changed while processing */
2901         wptr = r600_get_ih_wptr(rdev);
2902         if (wptr != rdev->ih.wptr)
2903                 goto restart_ih;
2904         if (queue_hotplug)
2905                 queue_work(rdev->wq, &rdev->hotplug_work);
2906         rdev->ih.rptr = rptr;
2907         WREG32(IH_RB_RPTR, rdev->ih.rptr);
2908         spin_unlock_irqrestore(&rdev->ih.lock, flags);
2909         return IRQ_HANDLED;
2910 }
2911
2912 /*
2913  * Debugfs info
2914  */
2915 #if defined(CONFIG_DEBUG_FS)
2916
2917 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
2918 {
2919         struct drm_info_node *node = (struct drm_info_node *) m->private;
2920         struct drm_device *dev = node->minor->dev;
2921         struct radeon_device *rdev = dev->dev_private;
2922         unsigned count, i, j;
2923
2924         radeon_ring_free_size(rdev);
2925         count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
2926         seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
2927         seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
2928         seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2929         seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2930         seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
2931         seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2932         seq_printf(m, "%u dwords in ring\n", count);
2933         i = rdev->cp.rptr;
2934         for (j = 0; j <= count; j++) {
2935                 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2936                 i = (i + 1) & rdev->cp.ptr_mask;
2937         }
2938         return 0;
2939 }
2940
2941 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
2942 {
2943         struct drm_info_node *node = (struct drm_info_node *) m->private;
2944         struct drm_device *dev = node->minor->dev;
2945         struct radeon_device *rdev = dev->dev_private;
2946
2947         DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
2948         DREG32_SYS(m, rdev, VM_L2_STATUS);
2949         return 0;
2950 }
2951
2952 static struct drm_info_list r600_mc_info_list[] = {
2953         {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
2954         {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
2955 };
2956 #endif
2957
2958 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2959 {
2960 #if defined(CONFIG_DEBUG_FS)
2961         return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
2962 #else
2963         return 0;
2964 #endif
2965 }
2966
2967 /**
2968  * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2969  * rdev: radeon device structure
2970  * bo: buffer object struct which userspace is waiting for idle
2971  *
2972  * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2973  * through ring buffer, this leads to corruption in rendering, see
2974  * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2975  * directly perform HDP flush by writing register through MMIO.
2976  */
2977 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2978 {
2979         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2980 }