Revert "drm/radeon: call hpd_irq_event on resume"
[pandora-kernel.git] / drivers / gpu / drm / radeon / radeon_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 #include "drmP.h"
28 #include "radeon_drm.h"
29 #include "radeon_reg.h"
30 #include "radeon.h"
31
32 void r100_cs_dump_packet(struct radeon_cs_parser *p,
33                          struct radeon_cs_packet *pkt);
34
35 int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36 {
37         struct drm_device *ddev = p->rdev->ddev;
38         struct radeon_cs_chunk *chunk;
39         unsigned i, j;
40         bool duplicate;
41
42         if (p->chunk_relocs_idx == -1) {
43                 return 0;
44         }
45         chunk = &p->chunks[p->chunk_relocs_idx];
46         /* FIXME: we assume that each relocs use 4 dwords */
47         p->nrelocs = chunk->length_dw / 4;
48         p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
49         if (p->relocs_ptr == NULL) {
50                 return -ENOMEM;
51         }
52         p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
53         if (p->relocs == NULL) {
54                 return -ENOMEM;
55         }
56         for (i = 0; i < p->nrelocs; i++) {
57                 struct drm_radeon_cs_reloc *r;
58
59                 duplicate = false;
60                 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
61                 for (j = 0; j < p->nrelocs; j++) {
62                         if (r->handle == p->relocs[j].handle) {
63                                 p->relocs_ptr[i] = &p->relocs[j];
64                                 duplicate = true;
65                                 break;
66                         }
67                 }
68                 if (!duplicate) {
69                         p->relocs[i].gobj = drm_gem_object_lookup(ddev,
70                                                                   p->filp,
71                                                                   r->handle);
72                         if (p->relocs[i].gobj == NULL) {
73                                 DRM_ERROR("gem object lookup failed 0x%x\n",
74                                           r->handle);
75                                 return -ENOENT;
76                         }
77                         p->relocs_ptr[i] = &p->relocs[i];
78                         p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
79                         p->relocs[i].lobj.bo = p->relocs[i].robj;
80                         p->relocs[i].lobj.wdomain = r->write_domain;
81                         p->relocs[i].lobj.rdomain = r->read_domains;
82                         p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
83                         p->relocs[i].handle = r->handle;
84                         p->relocs[i].flags = r->flags;
85                         radeon_bo_list_add_object(&p->relocs[i].lobj,
86                                                   &p->validated);
87                 }
88         }
89         return radeon_bo_list_validate(&p->validated);
90 }
91
92 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93 {
94         struct drm_radeon_cs *cs = data;
95         uint64_t *chunk_array_ptr;
96         unsigned size, i, flags = 0;
97
98         INIT_LIST_HEAD(&p->validated);
99
100         if (!cs->num_chunks) {
101                 return 0;
102         }
103
104         /* get chunks */
105         p->idx = 0;
106         p->chunk_ib_idx = -1;
107         p->chunk_relocs_idx = -1;
108         p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
109         if (p->chunks_array == NULL) {
110                 return -ENOMEM;
111         }
112         chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
113         if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
114                                sizeof(uint64_t)*cs->num_chunks)) {
115                 return -EFAULT;
116         }
117         p->nchunks = cs->num_chunks;
118         p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
119         if (p->chunks == NULL) {
120                 return -ENOMEM;
121         }
122         for (i = 0; i < p->nchunks; i++) {
123                 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
124                 struct drm_radeon_cs_chunk user_chunk;
125                 uint32_t __user *cdata;
126
127                 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
128                 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
129                                        sizeof(struct drm_radeon_cs_chunk))) {
130                         return -EFAULT;
131                 }
132                 p->chunks[i].length_dw = user_chunk.length_dw;
133                 p->chunks[i].kdata = NULL;
134                 p->chunks[i].chunk_id = user_chunk.chunk_id;
135
136                 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
137                         p->chunk_relocs_idx = i;
138                 }
139                 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
140                         p->chunk_ib_idx = i;
141                         /* zero length IB isn't useful */
142                         if (p->chunks[i].length_dw == 0)
143                                 return -EINVAL;
144                 }
145                 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
146                     !p->chunks[i].length_dw) {
147                         return -EINVAL;
148                 }
149
150                 p->chunks[i].length_dw = user_chunk.length_dw;
151                 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
152
153                 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
154                 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
155                         size = p->chunks[i].length_dw * sizeof(uint32_t);
156                         p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
157                         if (p->chunks[i].kdata == NULL) {
158                                 return -ENOMEM;
159                         }
160                         if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
161                                                p->chunks[i].user_ptr, size)) {
162                                 return -EFAULT;
163                         }
164                         if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
165                                 flags = p->chunks[i].kdata[0];
166                         }
167                 } else {
168                         p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
169                         p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
170                         if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
171                                 kfree(p->chunks[i].kpage[0]);
172                                 kfree(p->chunks[i].kpage[1]);
173                                 p->chunks[i].kpage[0] = NULL;
174                                 p->chunks[i].kpage[1] = NULL;
175                                 return -ENOMEM;
176                         }
177                         p->chunks[i].kpage_idx[0] = -1;
178                         p->chunks[i].kpage_idx[1] = -1;
179                         p->chunks[i].last_copied_page = -1;
180                         p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
181                 }
182         }
183         if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
184                 DRM_ERROR("cs IB too big: %d\n",
185                           p->chunks[p->chunk_ib_idx].length_dw);
186                 return -EINVAL;
187         }
188
189         p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
190         return 0;
191 }
192
193 /**
194  * cs_parser_fini() - clean parser states
195  * @parser:     parser structure holding parsing context.
196  * @error:      error number
197  *
198  * If error is set than unvalidate buffer, otherwise just free memory
199  * used by parsing context.
200  **/
201 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
202 {
203         unsigned i;
204
205
206         if (!error && parser->ib)
207                 ttm_eu_fence_buffer_objects(&parser->validated,
208                                             parser->ib->fence);
209         else
210                 ttm_eu_backoff_reservation(&parser->validated);
211
212         if (parser->relocs != NULL) {
213                 for (i = 0; i < parser->nrelocs; i++) {
214                         if (parser->relocs[i].gobj)
215                                 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
216                 }
217         }
218         kfree(parser->track);
219         kfree(parser->relocs);
220         kfree(parser->relocs_ptr);
221         for (i = 0; i < parser->nchunks; i++) {
222                 kfree(parser->chunks[i].kdata);
223                 kfree(parser->chunks[i].kpage[0]);
224                 kfree(parser->chunks[i].kpage[1]);
225         }
226         kfree(parser->chunks);
227         kfree(parser->chunks_array);
228         radeon_ib_free(parser->rdev, &parser->ib);
229 }
230
231 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
232 {
233         struct radeon_device *rdev = dev->dev_private;
234         struct radeon_cs_parser parser;
235         struct radeon_cs_chunk *ib_chunk;
236         int r;
237
238         radeon_mutex_lock(&rdev->cs_mutex);
239         /* initialize parser */
240         memset(&parser, 0, sizeof(struct radeon_cs_parser));
241         parser.filp = filp;
242         parser.rdev = rdev;
243         parser.dev = rdev->dev;
244         parser.family = rdev->family;
245         r = radeon_cs_parser_init(&parser, data);
246         if (r) {
247                 DRM_ERROR("Failed to initialize parser !\n");
248                 radeon_cs_parser_fini(&parser, r);
249                 radeon_mutex_unlock(&rdev->cs_mutex);
250                 return r;
251         }
252         r =  radeon_ib_get(rdev, &parser.ib);
253         if (r) {
254                 DRM_ERROR("Failed to get ib !\n");
255                 radeon_cs_parser_fini(&parser, r);
256                 radeon_mutex_unlock(&rdev->cs_mutex);
257                 return r;
258         }
259         r = radeon_cs_parser_relocs(&parser);
260         if (r) {
261                 if (r != -ERESTARTSYS)
262                         DRM_ERROR("Failed to parse relocation %d!\n", r);
263                 radeon_cs_parser_fini(&parser, r);
264                 radeon_mutex_unlock(&rdev->cs_mutex);
265                 return r;
266         }
267         /* Copy the packet into the IB, the parser will read from the
268          * input memory (cached) and write to the IB (which can be
269          * uncached). */
270         ib_chunk = &parser.chunks[parser.chunk_ib_idx];
271         parser.ib->length_dw = ib_chunk->length_dw;
272         r = radeon_cs_parse(&parser);
273         if (r || parser.parser_error) {
274                 DRM_ERROR("Invalid command stream !\n");
275                 radeon_cs_parser_fini(&parser, r);
276                 radeon_mutex_unlock(&rdev->cs_mutex);
277                 return r;
278         }
279         r = radeon_cs_finish_pages(&parser);
280         if (r) {
281                 DRM_ERROR("Invalid command stream !\n");
282                 radeon_cs_parser_fini(&parser, r);
283                 radeon_mutex_unlock(&rdev->cs_mutex);
284                 return r;
285         }
286         r = radeon_ib_schedule(rdev, parser.ib);
287         if (r) {
288                 DRM_ERROR("Failed to schedule IB !\n");
289         }
290         radeon_cs_parser_fini(&parser, r);
291         radeon_mutex_unlock(&rdev->cs_mutex);
292         return r;
293 }
294
295 int radeon_cs_finish_pages(struct radeon_cs_parser *p)
296 {
297         struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
298         int i;
299         int size = PAGE_SIZE;
300
301         for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
302                 if (i == ibc->last_page_index) {
303                         size = (ibc->length_dw * 4) % PAGE_SIZE;
304                         if (size == 0)
305                                 size = PAGE_SIZE;
306                 }
307                 
308                 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
309                                        ibc->user_ptr + (i * PAGE_SIZE),
310                                        size))
311                         return -EFAULT;
312         }
313         return 0;
314 }
315
316 int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
317 {
318         int new_page;
319         struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
320         int i;
321         int size = PAGE_SIZE;
322
323         for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
324                 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
325                                        ibc->user_ptr + (i * PAGE_SIZE),
326                                        PAGE_SIZE)) {
327                         p->parser_error = -EFAULT;
328                         return 0;
329                 }
330         }
331
332         new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
333
334         if (pg_idx == ibc->last_page_index) {
335                 size = (ibc->length_dw * 4) % PAGE_SIZE;
336                         if (size == 0)
337                                 size = PAGE_SIZE;
338         }
339
340         if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
341                                ibc->user_ptr + (pg_idx * PAGE_SIZE),
342                                size)) {
343                 p->parser_error = -EFAULT;
344                 return 0;
345         }
346
347         /* copy to IB here */
348         memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
349
350         ibc->last_copied_page = pg_idx;
351         ibc->kpage_idx[new_page] = pg_idx;
352
353         return new_page;
354 }