Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[pandora-kernel.git] / drivers / gpu / drm / i810 / i810_dma.c
1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2  * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28  *          Jeff Hartmann <jhartmann@valinux.com>
29  *          Keith Whitwell <keith@tungstengraphics.com>
30  *
31  */
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "i810_drm.h"
36 #include "i810_drv.h"
37 #include <linux/interrupt.h>    /* For task queue support */
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/pagemap.h>
41
42 #define I810_BUF_FREE           2
43 #define I810_BUF_CLIENT         1
44 #define I810_BUF_HARDWARE       0
45
46 #define I810_BUF_UNMAPPED 0
47 #define I810_BUF_MAPPED   1
48
49 static struct drm_buf *i810_freelist_get(struct drm_device * dev)
50 {
51         struct drm_device_dma *dma = dev->dma;
52         int i;
53         int used;
54
55         /* Linear search might not be the best solution */
56
57         for (i = 0; i < dma->buf_count; i++) {
58                 struct drm_buf *buf = dma->buflist[i];
59                 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
60                 /* In use is already a pointer */
61                 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
62                                I810_BUF_CLIENT);
63                 if (used == I810_BUF_FREE)
64                         return buf;
65         }
66         return NULL;
67 }
68
69 /* This should only be called if the buffer is not sent to the hardware
70  * yet, the hardware updates in use for us once its on the ring buffer.
71  */
72
73 static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
74 {
75         drm_i810_buf_priv_t *buf_priv = buf->dev_private;
76         int used;
77
78         /* In use is already a pointer */
79         used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
80         if (used != I810_BUF_CLIENT) {
81                 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
82                 return -EINVAL;
83         }
84
85         return 0;
86 }
87
88 static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
89 {
90         struct drm_file *priv = filp->private_data;
91         struct drm_device *dev;
92         drm_i810_private_t *dev_priv;
93         struct drm_buf *buf;
94         drm_i810_buf_priv_t *buf_priv;
95
96         dev = priv->minor->dev;
97         dev_priv = dev->dev_private;
98         buf = dev_priv->mmap_buffer;
99         buf_priv = buf->dev_private;
100
101         vma->vm_flags |= (VM_IO | VM_DONTCOPY);
102         vma->vm_file = filp;
103
104         buf_priv->currently_mapped = I810_BUF_MAPPED;
105
106         if (io_remap_pfn_range(vma, vma->vm_start,
107                                vma->vm_pgoff,
108                                vma->vm_end - vma->vm_start, vma->vm_page_prot))
109                 return -EAGAIN;
110         return 0;
111 }
112
113 static const struct file_operations i810_buffer_fops = {
114         .open = drm_open,
115         .release = drm_release,
116         .unlocked_ioctl = drm_ioctl,
117         .mmap = i810_mmap_buffers,
118         .fasync = drm_fasync,
119         .llseek = noop_llseek,
120 };
121
122 static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
123 {
124         struct drm_device *dev = file_priv->minor->dev;
125         drm_i810_buf_priv_t *buf_priv = buf->dev_private;
126         drm_i810_private_t *dev_priv = dev->dev_private;
127         const struct file_operations *old_fops;
128         int retcode = 0;
129
130         if (buf_priv->currently_mapped == I810_BUF_MAPPED)
131                 return -EINVAL;
132
133         down_write(&current->mm->mmap_sem);
134         old_fops = file_priv->filp->f_op;
135         file_priv->filp->f_op = &i810_buffer_fops;
136         dev_priv->mmap_buffer = buf;
137         buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
138                                             PROT_READ | PROT_WRITE,
139                                             MAP_SHARED, buf->bus_address);
140         dev_priv->mmap_buffer = NULL;
141         file_priv->filp->f_op = old_fops;
142         if (IS_ERR(buf_priv->virtual)) {
143                 /* Real error */
144                 DRM_ERROR("mmap error\n");
145                 retcode = PTR_ERR(buf_priv->virtual);
146                 buf_priv->virtual = NULL;
147         }
148         up_write(&current->mm->mmap_sem);
149
150         return retcode;
151 }
152
153 static int i810_unmap_buffer(struct drm_buf *buf)
154 {
155         drm_i810_buf_priv_t *buf_priv = buf->dev_private;
156         int retcode = 0;
157
158         if (buf_priv->currently_mapped != I810_BUF_MAPPED)
159                 return -EINVAL;
160
161         down_write(&current->mm->mmap_sem);
162         retcode = do_munmap(current->mm,
163                             (unsigned long)buf_priv->virtual,
164                             (size_t) buf->total);
165         up_write(&current->mm->mmap_sem);
166
167         buf_priv->currently_mapped = I810_BUF_UNMAPPED;
168         buf_priv->virtual = NULL;
169
170         return retcode;
171 }
172
173 static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
174                                struct drm_file *file_priv)
175 {
176         struct drm_buf *buf;
177         drm_i810_buf_priv_t *buf_priv;
178         int retcode = 0;
179
180         buf = i810_freelist_get(dev);
181         if (!buf) {
182                 retcode = -ENOMEM;
183                 DRM_DEBUG("retcode=%d\n", retcode);
184                 return retcode;
185         }
186
187         retcode = i810_map_buffer(buf, file_priv);
188         if (retcode) {
189                 i810_freelist_put(dev, buf);
190                 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
191                 return retcode;
192         }
193         buf->file_priv = file_priv;
194         buf_priv = buf->dev_private;
195         d->granted = 1;
196         d->request_idx = buf->idx;
197         d->request_size = buf->total;
198         d->virtual = buf_priv->virtual;
199
200         return retcode;
201 }
202
203 static int i810_dma_cleanup(struct drm_device *dev)
204 {
205         struct drm_device_dma *dma = dev->dma;
206
207         /* Make sure interrupts are disabled here because the uninstall ioctl
208          * may not have been called from userspace and after dev_private
209          * is freed, it's too late.
210          */
211         if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
212                 drm_irq_uninstall(dev);
213
214         if (dev->dev_private) {
215                 int i;
216                 drm_i810_private_t *dev_priv =
217                     (drm_i810_private_t *) dev->dev_private;
218
219                 if (dev_priv->ring.virtual_start)
220                         drm_core_ioremapfree(&dev_priv->ring.map, dev);
221                 if (dev_priv->hw_status_page) {
222                         pci_free_consistent(dev->pdev, PAGE_SIZE,
223                                             dev_priv->hw_status_page,
224                                             dev_priv->dma_status_page);
225                         /* Need to rewrite hardware status page */
226                         I810_WRITE(0x02080, 0x1ffff000);
227                 }
228                 kfree(dev->dev_private);
229                 dev->dev_private = NULL;
230
231                 for (i = 0; i < dma->buf_count; i++) {
232                         struct drm_buf *buf = dma->buflist[i];
233                         drm_i810_buf_priv_t *buf_priv = buf->dev_private;
234
235                         if (buf_priv->kernel_virtual && buf->total)
236                                 drm_core_ioremapfree(&buf_priv->map, dev);
237                 }
238         }
239         return 0;
240 }
241
242 static int i810_wait_ring(struct drm_device *dev, int n)
243 {
244         drm_i810_private_t *dev_priv = dev->dev_private;
245         drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
246         int iters = 0;
247         unsigned long end;
248         unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
249
250         end = jiffies + (HZ * 3);
251         while (ring->space < n) {
252                 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
253                 ring->space = ring->head - (ring->tail + 8);
254                 if (ring->space < 0)
255                         ring->space += ring->Size;
256
257                 if (ring->head != last_head) {
258                         end = jiffies + (HZ * 3);
259                         last_head = ring->head;
260                 }
261
262                 iters++;
263                 if (time_before(end, jiffies)) {
264                         DRM_ERROR("space: %d wanted %d\n", ring->space, n);
265                         DRM_ERROR("lockup\n");
266                         goto out_wait_ring;
267                 }
268                 udelay(1);
269         }
270
271 out_wait_ring:
272         return iters;
273 }
274
275 static void i810_kernel_lost_context(struct drm_device *dev)
276 {
277         drm_i810_private_t *dev_priv = dev->dev_private;
278         drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
279
280         ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
281         ring->tail = I810_READ(LP_RING + RING_TAIL);
282         ring->space = ring->head - (ring->tail + 8);
283         if (ring->space < 0)
284                 ring->space += ring->Size;
285 }
286
287 static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
288 {
289         struct drm_device_dma *dma = dev->dma;
290         int my_idx = 24;
291         u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
292         int i;
293
294         if (dma->buf_count > 1019) {
295                 /* Not enough space in the status page for the freelist */
296                 return -EINVAL;
297         }
298
299         for (i = 0; i < dma->buf_count; i++) {
300                 struct drm_buf *buf = dma->buflist[i];
301                 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
302
303                 buf_priv->in_use = hw_status++;
304                 buf_priv->my_use_idx = my_idx;
305                 my_idx += 4;
306
307                 *buf_priv->in_use = I810_BUF_FREE;
308
309                 buf_priv->map.offset = buf->bus_address;
310                 buf_priv->map.size = buf->total;
311                 buf_priv->map.type = _DRM_AGP;
312                 buf_priv->map.flags = 0;
313                 buf_priv->map.mtrr = 0;
314
315                 drm_core_ioremap(&buf_priv->map, dev);
316                 buf_priv->kernel_virtual = buf_priv->map.handle;
317
318         }
319         return 0;
320 }
321
322 static int i810_dma_initialize(struct drm_device *dev,
323                                drm_i810_private_t *dev_priv,
324                                drm_i810_init_t *init)
325 {
326         struct drm_map_list *r_list;
327         memset(dev_priv, 0, sizeof(drm_i810_private_t));
328
329         list_for_each_entry(r_list, &dev->maplist, head) {
330                 if (r_list->map &&
331                     r_list->map->type == _DRM_SHM &&
332                     r_list->map->flags & _DRM_CONTAINS_LOCK) {
333                         dev_priv->sarea_map = r_list->map;
334                         break;
335                 }
336         }
337         if (!dev_priv->sarea_map) {
338                 dev->dev_private = (void *)dev_priv;
339                 i810_dma_cleanup(dev);
340                 DRM_ERROR("can not find sarea!\n");
341                 return -EINVAL;
342         }
343         dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
344         if (!dev_priv->mmio_map) {
345                 dev->dev_private = (void *)dev_priv;
346                 i810_dma_cleanup(dev);
347                 DRM_ERROR("can not find mmio map!\n");
348                 return -EINVAL;
349         }
350         dev->agp_buffer_token = init->buffers_offset;
351         dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
352         if (!dev->agp_buffer_map) {
353                 dev->dev_private = (void *)dev_priv;
354                 i810_dma_cleanup(dev);
355                 DRM_ERROR("can not find dma buffer map!\n");
356                 return -EINVAL;
357         }
358
359         dev_priv->sarea_priv = (drm_i810_sarea_t *)
360             ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
361
362         dev_priv->ring.Start = init->ring_start;
363         dev_priv->ring.End = init->ring_end;
364         dev_priv->ring.Size = init->ring_size;
365
366         dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
367         dev_priv->ring.map.size = init->ring_size;
368         dev_priv->ring.map.type = _DRM_AGP;
369         dev_priv->ring.map.flags = 0;
370         dev_priv->ring.map.mtrr = 0;
371
372         drm_core_ioremap(&dev_priv->ring.map, dev);
373
374         if (dev_priv->ring.map.handle == NULL) {
375                 dev->dev_private = (void *)dev_priv;
376                 i810_dma_cleanup(dev);
377                 DRM_ERROR("can not ioremap virtual address for"
378                           " ring buffer\n");
379                 return -ENOMEM;
380         }
381
382         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
383
384         dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
385
386         dev_priv->w = init->w;
387         dev_priv->h = init->h;
388         dev_priv->pitch = init->pitch;
389         dev_priv->back_offset = init->back_offset;
390         dev_priv->depth_offset = init->depth_offset;
391         dev_priv->front_offset = init->front_offset;
392
393         dev_priv->overlay_offset = init->overlay_offset;
394         dev_priv->overlay_physical = init->overlay_physical;
395
396         dev_priv->front_di1 = init->front_offset | init->pitch_bits;
397         dev_priv->back_di1 = init->back_offset | init->pitch_bits;
398         dev_priv->zi1 = init->depth_offset | init->pitch_bits;
399
400         /* Program Hardware Status Page */
401         dev_priv->hw_status_page =
402             pci_alloc_consistent(dev->pdev, PAGE_SIZE,
403                                  &dev_priv->dma_status_page);
404         if (!dev_priv->hw_status_page) {
405                 dev->dev_private = (void *)dev_priv;
406                 i810_dma_cleanup(dev);
407                 DRM_ERROR("Can not allocate hardware status page\n");
408                 return -ENOMEM;
409         }
410         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
411         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
412
413         I810_WRITE(0x02080, dev_priv->dma_status_page);
414         DRM_DEBUG("Enabled hardware status page\n");
415
416         /* Now we need to init our freelist */
417         if (i810_freelist_init(dev, dev_priv) != 0) {
418                 dev->dev_private = (void *)dev_priv;
419                 i810_dma_cleanup(dev);
420                 DRM_ERROR("Not enough space in the status page for"
421                           " the freelist\n");
422                 return -ENOMEM;
423         }
424         dev->dev_private = (void *)dev_priv;
425
426         return 0;
427 }
428
429 static int i810_dma_init(struct drm_device *dev, void *data,
430                          struct drm_file *file_priv)
431 {
432         drm_i810_private_t *dev_priv;
433         drm_i810_init_t *init = data;
434         int retcode = 0;
435
436         switch (init->func) {
437         case I810_INIT_DMA_1_4:
438                 DRM_INFO("Using v1.4 init.\n");
439                 dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
440                 if (dev_priv == NULL)
441                         return -ENOMEM;
442                 retcode = i810_dma_initialize(dev, dev_priv, init);
443                 break;
444
445         case I810_CLEANUP_DMA:
446                 DRM_INFO("DMA Cleanup\n");
447                 retcode = i810_dma_cleanup(dev);
448                 break;
449         default:
450                 return -EINVAL;
451         }
452
453         return retcode;
454 }
455
456 /* Most efficient way to verify state for the i810 is as it is
457  * emitted.  Non-conformant state is silently dropped.
458  *
459  * Use 'volatile' & local var tmp to force the emitted values to be
460  * identical to the verified ones.
461  */
462 static void i810EmitContextVerified(struct drm_device *dev,
463                                     volatile unsigned int *code)
464 {
465         drm_i810_private_t *dev_priv = dev->dev_private;
466         int i, j = 0;
467         unsigned int tmp;
468         RING_LOCALS;
469
470         BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
471
472         OUT_RING(GFX_OP_COLOR_FACTOR);
473         OUT_RING(code[I810_CTXREG_CF1]);
474
475         OUT_RING(GFX_OP_STIPPLE);
476         OUT_RING(code[I810_CTXREG_ST1]);
477
478         for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
479                 tmp = code[i];
480
481                 if ((tmp & (7 << 29)) == (3 << 29) &&
482                     (tmp & (0x1f << 24)) < (0x1d << 24)) {
483                         OUT_RING(tmp);
484                         j++;
485                 } else
486                         printk("constext state dropped!!!\n");
487         }
488
489         if (j & 1)
490                 OUT_RING(0);
491
492         ADVANCE_LP_RING();
493 }
494
495 static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
496 {
497         drm_i810_private_t *dev_priv = dev->dev_private;
498         int i, j = 0;
499         unsigned int tmp;
500         RING_LOCALS;
501
502         BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
503
504         OUT_RING(GFX_OP_MAP_INFO);
505         OUT_RING(code[I810_TEXREG_MI1]);
506         OUT_RING(code[I810_TEXREG_MI2]);
507         OUT_RING(code[I810_TEXREG_MI3]);
508
509         for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
510                 tmp = code[i];
511
512                 if ((tmp & (7 << 29)) == (3 << 29) &&
513                     (tmp & (0x1f << 24)) < (0x1d << 24)) {
514                         OUT_RING(tmp);
515                         j++;
516                 } else
517                         printk("texture state dropped!!!\n");
518         }
519
520         if (j & 1)
521                 OUT_RING(0);
522
523         ADVANCE_LP_RING();
524 }
525
526 /* Need to do some additional checking when setting the dest buffer.
527  */
528 static void i810EmitDestVerified(struct drm_device *dev,
529                                  volatile unsigned int *code)
530 {
531         drm_i810_private_t *dev_priv = dev->dev_private;
532         unsigned int tmp;
533         RING_LOCALS;
534
535         BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
536
537         tmp = code[I810_DESTREG_DI1];
538         if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
539                 OUT_RING(CMD_OP_DESTBUFFER_INFO);
540                 OUT_RING(tmp);
541         } else
542                 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
543                           tmp, dev_priv->front_di1, dev_priv->back_di1);
544
545         /* invarient:
546          */
547         OUT_RING(CMD_OP_Z_BUFFER_INFO);
548         OUT_RING(dev_priv->zi1);
549
550         OUT_RING(GFX_OP_DESTBUFFER_VARS);
551         OUT_RING(code[I810_DESTREG_DV1]);
552
553         OUT_RING(GFX_OP_DRAWRECT_INFO);
554         OUT_RING(code[I810_DESTREG_DR1]);
555         OUT_RING(code[I810_DESTREG_DR2]);
556         OUT_RING(code[I810_DESTREG_DR3]);
557         OUT_RING(code[I810_DESTREG_DR4]);
558         OUT_RING(0);
559
560         ADVANCE_LP_RING();
561 }
562
563 static void i810EmitState(struct drm_device *dev)
564 {
565         drm_i810_private_t *dev_priv = dev->dev_private;
566         drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
567         unsigned int dirty = sarea_priv->dirty;
568
569         DRM_DEBUG("%x\n", dirty);
570
571         if (dirty & I810_UPLOAD_BUFFERS) {
572                 i810EmitDestVerified(dev, sarea_priv->BufferState);
573                 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
574         }
575
576         if (dirty & I810_UPLOAD_CTX) {
577                 i810EmitContextVerified(dev, sarea_priv->ContextState);
578                 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
579         }
580
581         if (dirty & I810_UPLOAD_TEX0) {
582                 i810EmitTexVerified(dev, sarea_priv->TexState[0]);
583                 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
584         }
585
586         if (dirty & I810_UPLOAD_TEX1) {
587                 i810EmitTexVerified(dev, sarea_priv->TexState[1]);
588                 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
589         }
590 }
591
592 /* need to verify
593  */
594 static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
595                                     unsigned int clear_color,
596                                     unsigned int clear_zval)
597 {
598         drm_i810_private_t *dev_priv = dev->dev_private;
599         drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
600         int nbox = sarea_priv->nbox;
601         struct drm_clip_rect *pbox = sarea_priv->boxes;
602         int pitch = dev_priv->pitch;
603         int cpp = 2;
604         int i;
605         RING_LOCALS;
606
607         if (dev_priv->current_page == 1) {
608                 unsigned int tmp = flags;
609
610                 flags &= ~(I810_FRONT | I810_BACK);
611                 if (tmp & I810_FRONT)
612                         flags |= I810_BACK;
613                 if (tmp & I810_BACK)
614                         flags |= I810_FRONT;
615         }
616
617         i810_kernel_lost_context(dev);
618
619         if (nbox > I810_NR_SAREA_CLIPRECTS)
620                 nbox = I810_NR_SAREA_CLIPRECTS;
621
622         for (i = 0; i < nbox; i++, pbox++) {
623                 unsigned int x = pbox->x1;
624                 unsigned int y = pbox->y1;
625                 unsigned int width = (pbox->x2 - x) * cpp;
626                 unsigned int height = pbox->y2 - y;
627                 unsigned int start = y * pitch + x * cpp;
628
629                 if (pbox->x1 > pbox->x2 ||
630                     pbox->y1 > pbox->y2 ||
631                     pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
632                         continue;
633
634                 if (flags & I810_FRONT) {
635                         BEGIN_LP_RING(6);
636                         OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
637                         OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
638                         OUT_RING((height << 16) | width);
639                         OUT_RING(start);
640                         OUT_RING(clear_color);
641                         OUT_RING(0);
642                         ADVANCE_LP_RING();
643                 }
644
645                 if (flags & I810_BACK) {
646                         BEGIN_LP_RING(6);
647                         OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
648                         OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
649                         OUT_RING((height << 16) | width);
650                         OUT_RING(dev_priv->back_offset + start);
651                         OUT_RING(clear_color);
652                         OUT_RING(0);
653                         ADVANCE_LP_RING();
654                 }
655
656                 if (flags & I810_DEPTH) {
657                         BEGIN_LP_RING(6);
658                         OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
659                         OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
660                         OUT_RING((height << 16) | width);
661                         OUT_RING(dev_priv->depth_offset + start);
662                         OUT_RING(clear_zval);
663                         OUT_RING(0);
664                         ADVANCE_LP_RING();
665                 }
666         }
667 }
668
669 static void i810_dma_dispatch_swap(struct drm_device *dev)
670 {
671         drm_i810_private_t *dev_priv = dev->dev_private;
672         drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
673         int nbox = sarea_priv->nbox;
674         struct drm_clip_rect *pbox = sarea_priv->boxes;
675         int pitch = dev_priv->pitch;
676         int cpp = 2;
677         int i;
678         RING_LOCALS;
679
680         DRM_DEBUG("swapbuffers\n");
681
682         i810_kernel_lost_context(dev);
683
684         if (nbox > I810_NR_SAREA_CLIPRECTS)
685                 nbox = I810_NR_SAREA_CLIPRECTS;
686
687         for (i = 0; i < nbox; i++, pbox++) {
688                 unsigned int w = pbox->x2 - pbox->x1;
689                 unsigned int h = pbox->y2 - pbox->y1;
690                 unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
691                 unsigned int start = dst;
692
693                 if (pbox->x1 > pbox->x2 ||
694                     pbox->y1 > pbox->y2 ||
695                     pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
696                         continue;
697
698                 BEGIN_LP_RING(6);
699                 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
700                 OUT_RING(pitch | (0xCC << 16));
701                 OUT_RING((h << 16) | (w * cpp));
702                 if (dev_priv->current_page == 0)
703                         OUT_RING(dev_priv->front_offset + start);
704                 else
705                         OUT_RING(dev_priv->back_offset + start);
706                 OUT_RING(pitch);
707                 if (dev_priv->current_page == 0)
708                         OUT_RING(dev_priv->back_offset + start);
709                 else
710                         OUT_RING(dev_priv->front_offset + start);
711                 ADVANCE_LP_RING();
712         }
713 }
714
715 static void i810_dma_dispatch_vertex(struct drm_device *dev,
716                                      struct drm_buf *buf, int discard, int used)
717 {
718         drm_i810_private_t *dev_priv = dev->dev_private;
719         drm_i810_buf_priv_t *buf_priv = buf->dev_private;
720         drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
721         struct drm_clip_rect *box = sarea_priv->boxes;
722         int nbox = sarea_priv->nbox;
723         unsigned long address = (unsigned long)buf->bus_address;
724         unsigned long start = address - dev->agp->base;
725         int i = 0;
726         RING_LOCALS;
727
728         i810_kernel_lost_context(dev);
729
730         if (nbox > I810_NR_SAREA_CLIPRECTS)
731                 nbox = I810_NR_SAREA_CLIPRECTS;
732
733         if (used > 4 * 1024)
734                 used = 0;
735
736         if (sarea_priv->dirty)
737                 i810EmitState(dev);
738
739         if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
740                 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
741
742                 *(u32 *) buf_priv->kernel_virtual =
743                     ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
744
745                 if (used & 4) {
746                         *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
747                         used += 4;
748                 }
749
750                 i810_unmap_buffer(buf);
751         }
752
753         if (used) {
754                 do {
755                         if (i < nbox) {
756                                 BEGIN_LP_RING(4);
757                                 OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
758                                          SC_ENABLE);
759                                 OUT_RING(GFX_OP_SCISSOR_INFO);
760                                 OUT_RING(box[i].x1 | (box[i].y1 << 16));
761                                 OUT_RING((box[i].x2 -
762                                           1) | ((box[i].y2 - 1) << 16));
763                                 ADVANCE_LP_RING();
764                         }
765
766                         BEGIN_LP_RING(4);
767                         OUT_RING(CMD_OP_BATCH_BUFFER);
768                         OUT_RING(start | BB1_PROTECTED);
769                         OUT_RING(start + used - 4);
770                         OUT_RING(0);
771                         ADVANCE_LP_RING();
772
773                 } while (++i < nbox);
774         }
775
776         if (discard) {
777                 dev_priv->counter++;
778
779                 (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
780                               I810_BUF_HARDWARE);
781
782                 BEGIN_LP_RING(8);
783                 OUT_RING(CMD_STORE_DWORD_IDX);
784                 OUT_RING(20);
785                 OUT_RING(dev_priv->counter);
786                 OUT_RING(CMD_STORE_DWORD_IDX);
787                 OUT_RING(buf_priv->my_use_idx);
788                 OUT_RING(I810_BUF_FREE);
789                 OUT_RING(CMD_REPORT_HEAD);
790                 OUT_RING(0);
791                 ADVANCE_LP_RING();
792         }
793 }
794
795 static void i810_dma_dispatch_flip(struct drm_device *dev)
796 {
797         drm_i810_private_t *dev_priv = dev->dev_private;
798         int pitch = dev_priv->pitch;
799         RING_LOCALS;
800
801         DRM_DEBUG("page=%d pfCurrentPage=%d\n",
802                   dev_priv->current_page,
803                   dev_priv->sarea_priv->pf_current_page);
804
805         i810_kernel_lost_context(dev);
806
807         BEGIN_LP_RING(2);
808         OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
809         OUT_RING(0);
810         ADVANCE_LP_RING();
811
812         BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
813         /* On i815 at least ASYNC is buggy */
814         /* pitch<<5 is from 11.2.8 p158,
815            its the pitch / 8 then left shifted 8,
816            so (pitch >> 3) << 8 */
817         OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
818         if (dev_priv->current_page == 0) {
819                 OUT_RING(dev_priv->back_offset);
820                 dev_priv->current_page = 1;
821         } else {
822                 OUT_RING(dev_priv->front_offset);
823                 dev_priv->current_page = 0;
824         }
825         OUT_RING(0);
826         ADVANCE_LP_RING();
827
828         BEGIN_LP_RING(2);
829         OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
830         OUT_RING(0);
831         ADVANCE_LP_RING();
832
833         /* Increment the frame counter.  The client-side 3D driver must
834          * throttle the framerate by waiting for this value before
835          * performing the swapbuffer ioctl.
836          */
837         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
838
839 }
840
841 static void i810_dma_quiescent(struct drm_device *dev)
842 {
843         drm_i810_private_t *dev_priv = dev->dev_private;
844         RING_LOCALS;
845
846         i810_kernel_lost_context(dev);
847
848         BEGIN_LP_RING(4);
849         OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
850         OUT_RING(CMD_REPORT_HEAD);
851         OUT_RING(0);
852         OUT_RING(0);
853         ADVANCE_LP_RING();
854
855         i810_wait_ring(dev, dev_priv->ring.Size - 8);
856 }
857
858 static int i810_flush_queue(struct drm_device *dev)
859 {
860         drm_i810_private_t *dev_priv = dev->dev_private;
861         struct drm_device_dma *dma = dev->dma;
862         int i, ret = 0;
863         RING_LOCALS;
864
865         i810_kernel_lost_context(dev);
866
867         BEGIN_LP_RING(2);
868         OUT_RING(CMD_REPORT_HEAD);
869         OUT_RING(0);
870         ADVANCE_LP_RING();
871
872         i810_wait_ring(dev, dev_priv->ring.Size - 8);
873
874         for (i = 0; i < dma->buf_count; i++) {
875                 struct drm_buf *buf = dma->buflist[i];
876                 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
877
878                 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
879                                    I810_BUF_FREE);
880
881                 if (used == I810_BUF_HARDWARE)
882                         DRM_DEBUG("reclaimed from HARDWARE\n");
883                 if (used == I810_BUF_CLIENT)
884                         DRM_DEBUG("still on client\n");
885         }
886
887         return ret;
888 }
889
890 /* Must be called with the lock held */
891 static void i810_reclaim_buffers(struct drm_device *dev,
892                                  struct drm_file *file_priv)
893 {
894         struct drm_device_dma *dma = dev->dma;
895         int i;
896
897         if (!dma)
898                 return;
899         if (!dev->dev_private)
900                 return;
901         if (!dma->buflist)
902                 return;
903
904         i810_flush_queue(dev);
905
906         for (i = 0; i < dma->buf_count; i++) {
907                 struct drm_buf *buf = dma->buflist[i];
908                 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
909
910                 if (buf->file_priv == file_priv && buf_priv) {
911                         int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
912                                            I810_BUF_FREE);
913
914                         if (used == I810_BUF_CLIENT)
915                                 DRM_DEBUG("reclaimed from client\n");
916                         if (buf_priv->currently_mapped == I810_BUF_MAPPED)
917                                 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
918                 }
919         }
920 }
921
922 static int i810_flush_ioctl(struct drm_device *dev, void *data,
923                             struct drm_file *file_priv)
924 {
925         LOCK_TEST_WITH_RETURN(dev, file_priv);
926
927         i810_flush_queue(dev);
928         return 0;
929 }
930
931 static int i810_dma_vertex(struct drm_device *dev, void *data,
932                            struct drm_file *file_priv)
933 {
934         struct drm_device_dma *dma = dev->dma;
935         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
936         u32 *hw_status = dev_priv->hw_status_page;
937         drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
938             dev_priv->sarea_priv;
939         drm_i810_vertex_t *vertex = data;
940
941         LOCK_TEST_WITH_RETURN(dev, file_priv);
942
943         DRM_DEBUG("idx %d used %d discard %d\n",
944                   vertex->idx, vertex->used, vertex->discard);
945
946         if (vertex->idx < 0 || vertex->idx > dma->buf_count)
947                 return -EINVAL;
948
949         i810_dma_dispatch_vertex(dev,
950                                  dma->buflist[vertex->idx],
951                                  vertex->discard, vertex->used);
952
953         atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
954         atomic_inc(&dev->counts[_DRM_STAT_DMA]);
955         sarea_priv->last_enqueue = dev_priv->counter - 1;
956         sarea_priv->last_dispatch = (int)hw_status[5];
957
958         return 0;
959 }
960
961 static int i810_clear_bufs(struct drm_device *dev, void *data,
962                            struct drm_file *file_priv)
963 {
964         drm_i810_clear_t *clear = data;
965
966         LOCK_TEST_WITH_RETURN(dev, file_priv);
967
968         /* GH: Someone's doing nasty things... */
969         if (!dev->dev_private)
970                 return -EINVAL;
971
972         i810_dma_dispatch_clear(dev, clear->flags,
973                                 clear->clear_color, clear->clear_depth);
974         return 0;
975 }
976
977 static int i810_swap_bufs(struct drm_device *dev, void *data,
978                           struct drm_file *file_priv)
979 {
980         DRM_DEBUG("\n");
981
982         LOCK_TEST_WITH_RETURN(dev, file_priv);
983
984         i810_dma_dispatch_swap(dev);
985         return 0;
986 }
987
988 static int i810_getage(struct drm_device *dev, void *data,
989                        struct drm_file *file_priv)
990 {
991         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
992         u32 *hw_status = dev_priv->hw_status_page;
993         drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
994             dev_priv->sarea_priv;
995
996         sarea_priv->last_dispatch = (int)hw_status[5];
997         return 0;
998 }
999
1000 static int i810_getbuf(struct drm_device *dev, void *data,
1001                        struct drm_file *file_priv)
1002 {
1003         int retcode = 0;
1004         drm_i810_dma_t *d = data;
1005         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1006         u32 *hw_status = dev_priv->hw_status_page;
1007         drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1008             dev_priv->sarea_priv;
1009
1010         LOCK_TEST_WITH_RETURN(dev, file_priv);
1011
1012         d->granted = 0;
1013
1014         retcode = i810_dma_get_buffer(dev, d, file_priv);
1015
1016         DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1017                   task_pid_nr(current), retcode, d->granted);
1018
1019         sarea_priv->last_dispatch = (int)hw_status[5];
1020
1021         return retcode;
1022 }
1023
1024 static int i810_copybuf(struct drm_device *dev, void *data,
1025                         struct drm_file *file_priv)
1026 {
1027         /* Never copy - 2.4.x doesn't need it */
1028         return 0;
1029 }
1030
1031 static int i810_docopy(struct drm_device *dev, void *data,
1032                         struct drm_file *file_priv)
1033 {
1034         /* Never copy - 2.4.x doesn't need it */
1035         return 0;
1036 }
1037
1038 static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
1039                                  unsigned int last_render)
1040 {
1041         drm_i810_private_t *dev_priv = dev->dev_private;
1042         drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1043         drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1044         unsigned long address = (unsigned long)buf->bus_address;
1045         unsigned long start = address - dev->agp->base;
1046         int u;
1047         RING_LOCALS;
1048
1049         i810_kernel_lost_context(dev);
1050
1051         u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
1052         if (u != I810_BUF_CLIENT)
1053                 DRM_DEBUG("MC found buffer that isn't mine!\n");
1054
1055         if (used > 4 * 1024)
1056                 used = 0;
1057
1058         sarea_priv->dirty = 0x7f;
1059
1060         DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
1061
1062         dev_priv->counter++;
1063         DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1064         DRM_DEBUG("start : %lx\n", start);
1065         DRM_DEBUG("used : %d\n", used);
1066         DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1067
1068         if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1069                 if (used & 4) {
1070                         *(u32 *) ((char *) buf_priv->virtual + used) = 0;
1071                         used += 4;
1072                 }
1073
1074                 i810_unmap_buffer(buf);
1075         }
1076         BEGIN_LP_RING(4);
1077         OUT_RING(CMD_OP_BATCH_BUFFER);
1078         OUT_RING(start | BB1_PROTECTED);
1079         OUT_RING(start + used - 4);
1080         OUT_RING(0);
1081         ADVANCE_LP_RING();
1082
1083         BEGIN_LP_RING(8);
1084         OUT_RING(CMD_STORE_DWORD_IDX);
1085         OUT_RING(buf_priv->my_use_idx);
1086         OUT_RING(I810_BUF_FREE);
1087         OUT_RING(0);
1088
1089         OUT_RING(CMD_STORE_DWORD_IDX);
1090         OUT_RING(16);
1091         OUT_RING(last_render);
1092         OUT_RING(0);
1093         ADVANCE_LP_RING();
1094 }
1095
1096 static int i810_dma_mc(struct drm_device *dev, void *data,
1097                        struct drm_file *file_priv)
1098 {
1099         struct drm_device_dma *dma = dev->dma;
1100         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1101         u32 *hw_status = dev_priv->hw_status_page;
1102         drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1103             dev_priv->sarea_priv;
1104         drm_i810_mc_t *mc = data;
1105
1106         LOCK_TEST_WITH_RETURN(dev, file_priv);
1107
1108         if (mc->idx >= dma->buf_count || mc->idx < 0)
1109                 return -EINVAL;
1110
1111         i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1112                              mc->last_render);
1113
1114         atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1115         atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1116         sarea_priv->last_enqueue = dev_priv->counter - 1;
1117         sarea_priv->last_dispatch = (int)hw_status[5];
1118
1119         return 0;
1120 }
1121
1122 static int i810_rstatus(struct drm_device *dev, void *data,
1123                         struct drm_file *file_priv)
1124 {
1125         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1126
1127         return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1128 }
1129
1130 static int i810_ov0_info(struct drm_device *dev, void *data,
1131                          struct drm_file *file_priv)
1132 {
1133         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1134         drm_i810_overlay_t *ov = data;
1135
1136         ov->offset = dev_priv->overlay_offset;
1137         ov->physical = dev_priv->overlay_physical;
1138
1139         return 0;
1140 }
1141
1142 static int i810_fstatus(struct drm_device *dev, void *data,
1143                         struct drm_file *file_priv)
1144 {
1145         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1146
1147         LOCK_TEST_WITH_RETURN(dev, file_priv);
1148         return I810_READ(0x30008);
1149 }
1150
1151 static int i810_ov0_flip(struct drm_device *dev, void *data,
1152                          struct drm_file *file_priv)
1153 {
1154         drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1155
1156         LOCK_TEST_WITH_RETURN(dev, file_priv);
1157
1158         /* Tell the overlay to update */
1159         I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
1160
1161         return 0;
1162 }
1163
1164 /* Not sure why this isn't set all the time:
1165  */
1166 static void i810_do_init_pageflip(struct drm_device *dev)
1167 {
1168         drm_i810_private_t *dev_priv = dev->dev_private;
1169
1170         DRM_DEBUG("\n");
1171         dev_priv->page_flipping = 1;
1172         dev_priv->current_page = 0;
1173         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1174 }
1175
1176 static int i810_do_cleanup_pageflip(struct drm_device *dev)
1177 {
1178         drm_i810_private_t *dev_priv = dev->dev_private;
1179
1180         DRM_DEBUG("\n");
1181         if (dev_priv->current_page != 0)
1182                 i810_dma_dispatch_flip(dev);
1183
1184         dev_priv->page_flipping = 0;
1185         return 0;
1186 }
1187
1188 static int i810_flip_bufs(struct drm_device *dev, void *data,
1189                           struct drm_file *file_priv)
1190 {
1191         drm_i810_private_t *dev_priv = dev->dev_private;
1192
1193         DRM_DEBUG("\n");
1194
1195         LOCK_TEST_WITH_RETURN(dev, file_priv);
1196
1197         if (!dev_priv->page_flipping)
1198                 i810_do_init_pageflip(dev);
1199
1200         i810_dma_dispatch_flip(dev);
1201         return 0;
1202 }
1203
1204 int i810_driver_load(struct drm_device *dev, unsigned long flags)
1205 {
1206         /* i810 has 4 more counters */
1207         dev->counters += 4;
1208         dev->types[6] = _DRM_STAT_IRQ;
1209         dev->types[7] = _DRM_STAT_PRIMARY;
1210         dev->types[8] = _DRM_STAT_SECONDARY;
1211         dev->types[9] = _DRM_STAT_DMA;
1212
1213         return 0;
1214 }
1215
1216 void i810_driver_lastclose(struct drm_device *dev)
1217 {
1218         i810_dma_cleanup(dev);
1219 }
1220
1221 void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1222 {
1223         if (dev->dev_private) {
1224                 drm_i810_private_t *dev_priv = dev->dev_private;
1225                 if (dev_priv->page_flipping)
1226                         i810_do_cleanup_pageflip(dev);
1227         }
1228 }
1229
1230 void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
1231                                         struct drm_file *file_priv)
1232 {
1233         i810_reclaim_buffers(dev, file_priv);
1234 }
1235
1236 int i810_driver_dma_quiescent(struct drm_device *dev)
1237 {
1238         i810_dma_quiescent(dev);
1239         return 0;
1240 }
1241
1242 struct drm_ioctl_desc i810_ioctls[] = {
1243         DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1244         DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1245         DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1246         DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1247         DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
1248         DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
1249         DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1250         DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
1251         DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
1252         DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
1253         DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
1254         DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
1255         DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1256         DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
1257         DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1258 };
1259
1260 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1261
1262 /**
1263  * Determine if the device really is AGP or not.
1264  *
1265  * All Intel graphics chipsets are treated as AGP, even if they are really
1266  * PCI-e.
1267  *
1268  * \param dev   The device to be tested.
1269  *
1270  * \returns
1271  * A value of 1 is always retured to indictate every i810 is AGP.
1272  */
1273 int i810_driver_device_is_agp(struct drm_device *dev)
1274 {
1275         return 1;
1276 }