Merge branch 'drm-radeon-evergreen-accel' into drm-core-next
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nv40_fifo.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "nouveau_drv.h"
29 #include "nouveau_drm.h"
30
31 #define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
32 #define NV40_RAMFC__SIZE 128
33
34 int
35 nv40_fifo_create_context(struct nouveau_channel *chan)
36 {
37         struct drm_device *dev = chan->dev;
38         struct drm_nouveau_private *dev_priv = dev->dev_private;
39         uint32_t fc = NV40_RAMFC(chan->id);
40         unsigned long flags;
41         int ret;
42
43         ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
44                                       NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
45                                       NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
46         if (ret)
47                 return ret;
48
49         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
50
51         dev_priv->engine.instmem.prepare_access(dev, true);
52         nv_wi32(dev, fc +  0, chan->pushbuf_base);
53         nv_wi32(dev, fc +  4, chan->pushbuf_base);
54         nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
55         nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
56                               NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
57                               NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
58 #ifdef __BIG_ENDIAN
59                               NV_PFIFO_CACHE1_BIG_ENDIAN |
60 #endif
61                               0x30000000 /* no idea.. */);
62         nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
63         nv_wi32(dev, fc + 60, 0x0001FFFF);
64         dev_priv->engine.instmem.finish_access(dev);
65
66         /* enable the fifo dma operation */
67         nv_wr32(dev, NV04_PFIFO_MODE,
68                 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
69
70         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
71         return 0;
72 }
73
74 void
75 nv40_fifo_destroy_context(struct nouveau_channel *chan)
76 {
77         struct drm_device *dev = chan->dev;
78
79         nv_wr32(dev, NV04_PFIFO_MODE,
80                 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
81
82         if (chan->ramfc)
83                 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
84 }
85
86 static void
87 nv40_fifo_do_load_context(struct drm_device *dev, int chid)
88 {
89         struct drm_nouveau_private *dev_priv = dev->dev_private;
90         uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
91
92         dev_priv->engine.instmem.prepare_access(dev, false);
93
94         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
95         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
96         nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
97         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
98         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
99         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
100
101         /* No idea what 0x2058 is.. */
102         tmp   = nv_ri32(dev, fc + 24);
103         tmp2  = nv_rd32(dev, 0x2058) & 0xFFF;
104         tmp2 |= (tmp & 0x30000000);
105         nv_wr32(dev, 0x2058, tmp2);
106         tmp  &= ~0x30000000;
107         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
108
109         nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
110         nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
111         nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
112         tmp = nv_ri32(dev, fc + 40);
113         nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
114         nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
115         nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
116         nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
117         nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
118
119         /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
120         tmp  = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
121         tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
122         nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
123
124         nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
125         /* NVIDIA does this next line twice... */
126         nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
127         nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
128         nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
129
130         dev_priv->engine.instmem.finish_access(dev);
131
132         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
133         nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
134 }
135
136 int
137 nv40_fifo_load_context(struct nouveau_channel *chan)
138 {
139         struct drm_device *dev = chan->dev;
140         uint32_t tmp;
141
142         nv40_fifo_do_load_context(dev, chan->id);
143
144         /* Set channel active, and in DMA mode */
145         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
146                      NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
147         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
148
149         /* Reset DMA_CTL_AT_INFO to INVALID */
150         tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
151         nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
152
153         return 0;
154 }
155
156 int
157 nv40_fifo_unload_context(struct drm_device *dev)
158 {
159         struct drm_nouveau_private *dev_priv = dev->dev_private;
160         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
161         uint32_t fc, tmp;
162         int chid;
163
164         chid = pfifo->channel_id(dev);
165         if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
166                 return 0;
167         fc = NV40_RAMFC(chid);
168
169         dev_priv->engine.instmem.prepare_access(dev, true);
170         nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
171         nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
172         nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
173         nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
174         nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
175         nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
176         tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
177         tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
178         nv_wi32(dev, fc + 24, tmp);
179         nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
180         nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
181         nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
182         tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
183         nv_wi32(dev, fc + 40, tmp);
184         nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
185         nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
186         /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
187          * more involved depending on the value of 0x3228?
188          */
189         nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
190         nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
191         nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
192         /* No idea what the below is for exactly, ripped from a mmio-trace */
193         nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
194         /* NVIDIA do this next line twice.. bug? */
195         nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
196         nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
197         nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
198 #if 0 /* no real idea which is PUT/GET in UNK_48.. */
199         tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
200         tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
201         nv_wi32(dev, fc + 72, tmp);
202 #endif
203         dev_priv->engine.instmem.finish_access(dev);
204
205         nv40_fifo_do_load_context(dev, pfifo->channels - 1);
206         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
207                      NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
208         return 0;
209 }
210
211 static void
212 nv40_fifo_init_reset(struct drm_device *dev)
213 {
214         int i;
215
216         nv_wr32(dev, NV03_PMC_ENABLE,
217                 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
218         nv_wr32(dev, NV03_PMC_ENABLE,
219                 nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
220
221         nv_wr32(dev, 0x003224, 0x000f0078);
222         nv_wr32(dev, 0x003210, 0x00000000);
223         nv_wr32(dev, 0x003270, 0x00000000);
224         nv_wr32(dev, 0x003240, 0x00000000);
225         nv_wr32(dev, 0x003244, 0x00000000);
226         nv_wr32(dev, 0x003258, 0x00000000);
227         nv_wr32(dev, 0x002504, 0x00000000);
228         for (i = 0; i < 16; i++)
229                 nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
230         nv_wr32(dev, 0x00250c, 0x0000ffff);
231         nv_wr32(dev, 0x002048, 0x00000000);
232         nv_wr32(dev, 0x003228, 0x00000000);
233         nv_wr32(dev, 0x0032e8, 0x00000000);
234         nv_wr32(dev, 0x002410, 0x00000000);
235         nv_wr32(dev, 0x002420, 0x00000000);
236         nv_wr32(dev, 0x002058, 0x00000001);
237         nv_wr32(dev, 0x00221c, 0x00000000);
238         /* something with 0x2084, read/modify/write, no change */
239         nv_wr32(dev, 0x002040, 0x000000ff);
240         nv_wr32(dev, 0x002500, 0x00000000);
241         nv_wr32(dev, 0x003200, 0x00000000);
242
243         nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
244 }
245
246 static void
247 nv40_fifo_init_ramxx(struct drm_device *dev)
248 {
249         struct drm_nouveau_private *dev_priv = dev->dev_private;
250
251         nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
252                                        ((dev_priv->ramht_bits - 9) << 16) |
253                                        (dev_priv->ramht_offset >> 8));
254         nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
255
256         switch (dev_priv->chipset) {
257         case 0x47:
258         case 0x49:
259         case 0x4b:
260                 nv_wr32(dev, 0x2230, 1);
261                 break;
262         default:
263                 break;
264         }
265
266         switch (dev_priv->chipset) {
267         case 0x40:
268         case 0x41:
269         case 0x42:
270         case 0x43:
271         case 0x45:
272         case 0x47:
273         case 0x48:
274         case 0x49:
275         case 0x4b:
276                 nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
277                 break;
278         default:
279                 nv_wr32(dev, 0x2230, 0);
280                 nv_wr32(dev, NV40_PFIFO_RAMFC,
281                         ((dev_priv->vram_size - 512 * 1024 +
282                           dev_priv->ramfc_offset) >> 16) | (3 << 16));
283                 break;
284         }
285 }
286
287 static void
288 nv40_fifo_init_intr(struct drm_device *dev)
289 {
290         nv_wr32(dev, 0x002100, 0xffffffff);
291         nv_wr32(dev, 0x002140, 0xffffffff);
292 }
293
294 int
295 nv40_fifo_init(struct drm_device *dev)
296 {
297         struct drm_nouveau_private *dev_priv = dev->dev_private;
298         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
299         int i;
300
301         nv40_fifo_init_reset(dev);
302         nv40_fifo_init_ramxx(dev);
303
304         nv40_fifo_do_load_context(dev, pfifo->channels - 1);
305         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
306
307         nv40_fifo_init_intr(dev);
308         pfifo->enable(dev);
309         pfifo->reassign(dev, true);
310
311         for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
312                 if (dev_priv->fifos[i]) {
313                         uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
314                         nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
315                 }
316         }
317
318         return 0;
319 }