Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[pandora-kernel.git] / drivers / gpu / drm / radeon / radeon_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 #include "drmP.h"
28 #include "radeon_drm.h"
29 #include "radeon_reg.h"
30 #include "radeon.h"
31
32 void r100_cs_dump_packet(struct radeon_cs_parser *p,
33                          struct radeon_cs_packet *pkt);
34
35 int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36 {
37         struct drm_device *ddev = p->rdev->ddev;
38         struct radeon_cs_chunk *chunk;
39         unsigned i, j;
40         bool duplicate;
41
42         if (p->chunk_relocs_idx == -1) {
43                 return 0;
44         }
45         chunk = &p->chunks[p->chunk_relocs_idx];
46         /* FIXME: we assume that each relocs use 4 dwords */
47         p->nrelocs = chunk->length_dw / 4;
48         p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
49         if (p->relocs_ptr == NULL) {
50                 return -ENOMEM;
51         }
52         p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
53         if (p->relocs == NULL) {
54                 return -ENOMEM;
55         }
56         for (i = 0; i < p->nrelocs; i++) {
57                 struct drm_radeon_cs_reloc *r;
58
59                 duplicate = false;
60                 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
61                 for (j = 0; j < p->nrelocs; j++) {
62                         if (r->handle == p->relocs[j].handle) {
63                                 p->relocs_ptr[i] = &p->relocs[j];
64                                 duplicate = true;
65                                 break;
66                         }
67                 }
68                 if (!duplicate) {
69                         p->relocs[i].gobj = drm_gem_object_lookup(ddev,
70                                                                   p->filp,
71                                                                   r->handle);
72                         if (p->relocs[i].gobj == NULL) {
73                                 DRM_ERROR("gem object lookup failed 0x%x\n",
74                                           r->handle);
75                                 return -ENOENT;
76                         }
77                         p->relocs_ptr[i] = &p->relocs[i];
78                         p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
79                         p->relocs[i].lobj.bo = p->relocs[i].robj;
80                         p->relocs[i].lobj.wdomain = r->write_domain;
81                         p->relocs[i].lobj.rdomain = r->read_domains;
82                         p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
83                         p->relocs[i].handle = r->handle;
84                         p->relocs[i].flags = r->flags;
85                         radeon_bo_list_add_object(&p->relocs[i].lobj,
86                                                   &p->validated);
87                 }
88         }
89         return radeon_bo_list_validate(&p->validated);
90 }
91
92 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93 {
94         struct drm_radeon_cs *cs = data;
95         uint64_t *chunk_array_ptr;
96         unsigned size, i, flags = 0;
97
98         if (!cs->num_chunks) {
99                 return 0;
100         }
101         /* get chunks */
102         INIT_LIST_HEAD(&p->validated);
103         p->idx = 0;
104         p->chunk_ib_idx = -1;
105         p->chunk_relocs_idx = -1;
106         p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107         if (p->chunks_array == NULL) {
108                 return -ENOMEM;
109         }
110         chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111         if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112                                sizeof(uint64_t)*cs->num_chunks)) {
113                 return -EFAULT;
114         }
115         p->nchunks = cs->num_chunks;
116         p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117         if (p->chunks == NULL) {
118                 return -ENOMEM;
119         }
120         for (i = 0; i < p->nchunks; i++) {
121                 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122                 struct drm_radeon_cs_chunk user_chunk;
123                 uint32_t __user *cdata;
124
125                 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126                 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127                                        sizeof(struct drm_radeon_cs_chunk))) {
128                         return -EFAULT;
129                 }
130                 p->chunks[i].length_dw = user_chunk.length_dw;
131                 p->chunks[i].kdata = NULL;
132                 p->chunks[i].chunk_id = user_chunk.chunk_id;
133
134                 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
135                         p->chunk_relocs_idx = i;
136                 }
137                 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
138                         p->chunk_ib_idx = i;
139                         /* zero length IB isn't useful */
140                         if (p->chunks[i].length_dw == 0)
141                                 return -EINVAL;
142                 }
143                 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
144                     !p->chunks[i].length_dw) {
145                         return -EINVAL;
146                 }
147
148                 p->chunks[i].length_dw = user_chunk.length_dw;
149                 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
150
151                 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
152                 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
153                         size = p->chunks[i].length_dw * sizeof(uint32_t);
154                         p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
155                         if (p->chunks[i].kdata == NULL) {
156                                 return -ENOMEM;
157                         }
158                         if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
159                                                p->chunks[i].user_ptr, size)) {
160                                 return -EFAULT;
161                         }
162                         if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163                                 flags = p->chunks[i].kdata[0];
164                         }
165                 } else {
166                         p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
167                         p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
168                         if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
169                                 kfree(p->chunks[i].kpage[0]);
170                                 kfree(p->chunks[i].kpage[1]);
171                                 return -ENOMEM;
172                         }
173                         p->chunks[i].kpage_idx[0] = -1;
174                         p->chunks[i].kpage_idx[1] = -1;
175                         p->chunks[i].last_copied_page = -1;
176                         p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
177                 }
178         }
179         if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
180                 DRM_ERROR("cs IB too big: %d\n",
181                           p->chunks[p->chunk_ib_idx].length_dw);
182                 return -EINVAL;
183         }
184
185         p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
186         return 0;
187 }
188
189 /**
190  * cs_parser_fini() - clean parser states
191  * @parser:     parser structure holding parsing context.
192  * @error:      error number
193  *
194  * If error is set than unvalidate buffer, otherwise just free memory
195  * used by parsing context.
196  **/
197 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
198 {
199         unsigned i;
200
201
202         if (!error && parser->ib)
203                 ttm_eu_fence_buffer_objects(&parser->validated,
204                                             parser->ib->fence);
205         else
206                 ttm_eu_backoff_reservation(&parser->validated);
207
208         if (parser->relocs != NULL) {
209                 for (i = 0; i < parser->nrelocs; i++) {
210                         if (parser->relocs[i].gobj)
211                                 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
212                 }
213         }
214         kfree(parser->track);
215         kfree(parser->relocs);
216         kfree(parser->relocs_ptr);
217         for (i = 0; i < parser->nchunks; i++) {
218                 kfree(parser->chunks[i].kdata);
219                 kfree(parser->chunks[i].kpage[0]);
220                 kfree(parser->chunks[i].kpage[1]);
221         }
222         kfree(parser->chunks);
223         kfree(parser->chunks_array);
224         radeon_ib_free(parser->rdev, &parser->ib);
225 }
226
227 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
228 {
229         struct radeon_device *rdev = dev->dev_private;
230         struct radeon_cs_parser parser;
231         struct radeon_cs_chunk *ib_chunk;
232         int r;
233
234         radeon_mutex_lock(&rdev->cs_mutex);
235         /* initialize parser */
236         memset(&parser, 0, sizeof(struct radeon_cs_parser));
237         parser.filp = filp;
238         parser.rdev = rdev;
239         parser.dev = rdev->dev;
240         parser.family = rdev->family;
241         r = radeon_cs_parser_init(&parser, data);
242         if (r) {
243                 DRM_ERROR("Failed to initialize parser !\n");
244                 radeon_cs_parser_fini(&parser, r);
245                 radeon_mutex_unlock(&rdev->cs_mutex);
246                 return r;
247         }
248         r =  radeon_ib_get(rdev, &parser.ib);
249         if (r) {
250                 DRM_ERROR("Failed to get ib !\n");
251                 radeon_cs_parser_fini(&parser, r);
252                 radeon_mutex_unlock(&rdev->cs_mutex);
253                 return r;
254         }
255         r = radeon_cs_parser_relocs(&parser);
256         if (r) {
257                 if (r != -ERESTARTSYS)
258                         DRM_ERROR("Failed to parse relocation %d!\n", r);
259                 radeon_cs_parser_fini(&parser, r);
260                 radeon_mutex_unlock(&rdev->cs_mutex);
261                 return r;
262         }
263         /* Copy the packet into the IB, the parser will read from the
264          * input memory (cached) and write to the IB (which can be
265          * uncached). */
266         ib_chunk = &parser.chunks[parser.chunk_ib_idx];
267         parser.ib->length_dw = ib_chunk->length_dw;
268         r = radeon_cs_parse(&parser);
269         if (r || parser.parser_error) {
270                 DRM_ERROR("Invalid command stream !\n");
271                 radeon_cs_parser_fini(&parser, r);
272                 radeon_mutex_unlock(&rdev->cs_mutex);
273                 return r;
274         }
275         r = radeon_cs_finish_pages(&parser);
276         if (r) {
277                 DRM_ERROR("Invalid command stream !\n");
278                 radeon_cs_parser_fini(&parser, r);
279                 radeon_mutex_unlock(&rdev->cs_mutex);
280                 return r;
281         }
282         r = radeon_ib_schedule(rdev, parser.ib);
283         if (r) {
284                 DRM_ERROR("Failed to schedule IB !\n");
285         }
286         radeon_cs_parser_fini(&parser, r);
287         radeon_mutex_unlock(&rdev->cs_mutex);
288         return r;
289 }
290
291 int radeon_cs_finish_pages(struct radeon_cs_parser *p)
292 {
293         struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
294         int i;
295         int size = PAGE_SIZE;
296
297         for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
298                 if (i == ibc->last_page_index) {
299                         size = (ibc->length_dw * 4) % PAGE_SIZE;
300                         if (size == 0)
301                                 size = PAGE_SIZE;
302                 }
303                 
304                 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
305                                        ibc->user_ptr + (i * PAGE_SIZE),
306                                        size))
307                         return -EFAULT;
308         }
309         return 0;
310 }
311
312 int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
313 {
314         int new_page;
315         struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
316         int i;
317         int size = PAGE_SIZE;
318
319         for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
320                 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
321                                        ibc->user_ptr + (i * PAGE_SIZE),
322                                        PAGE_SIZE)) {
323                         p->parser_error = -EFAULT;
324                         return 0;
325                 }
326         }
327
328         new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
329
330         if (pg_idx == ibc->last_page_index) {
331                 size = (ibc->length_dw * 4) % PAGE_SIZE;
332                         if (size == 0)
333                                 size = PAGE_SIZE;
334         }
335
336         if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
337                                ibc->user_ptr + (pg_idx * PAGE_SIZE),
338                                size)) {
339                 p->parser_error = -EFAULT;
340                 return 0;
341         }
342
343         /* copy to IB here */
344         memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
345
346         ibc->last_copied_page = pg_idx;
347         ibc->kpage_idx[new_page] = pg_idx;
348
349         return new_page;
350 }