Merge commit 'v2.6.39-rc3' into for-2.6.39
[pandora-kernel.git] / drivers / gpu / drm / mga / mga_dma.c
1 /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
2  * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  */
27
28 /**
29  * \file mga_dma.c
30  * DMA support for MGA G200 / G400.
31  *
32  * \author Rickard E. (Rik) Faith <faith@valinux.com>
33  * \author Jeff Hartmann <jhartmann@valinux.com>
34  * \author Keith Whitwell <keith@tungstengraphics.com>
35  * \author Gareth Hughes <gareth@valinux.com>
36  */
37
38 #include "drmP.h"
39 #include "drm.h"
40 #include "drm_sarea.h"
41 #include "mga_drm.h"
42 #include "mga_drv.h"
43
44 #define MGA_DEFAULT_USEC_TIMEOUT        10000
45 #define MGA_FREELIST_DEBUG              0
46
47 #define MINIMAL_CLEANUP 0
48 #define FULL_CLEANUP 1
49 static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
50
51 /* ================================================================
52  * Engine control
53  */
54
55 int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
56 {
57         u32 status = 0;
58         int i;
59         DRM_DEBUG("\n");
60
61         for (i = 0; i < dev_priv->usec_timeout; i++) {
62                 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
63                 if (status == MGA_ENDPRDMASTS) {
64                         MGA_WRITE8(MGA_CRTC_INDEX, 0);
65                         return 0;
66                 }
67                 DRM_UDELAY(1);
68         }
69
70 #if MGA_DMA_DEBUG
71         DRM_ERROR("failed!\n");
72         DRM_INFO("   status=0x%08x\n", status);
73 #endif
74         return -EBUSY;
75 }
76
77 static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
78 {
79         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
80         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
81
82         DRM_DEBUG("\n");
83
84         /* The primary DMA stream should look like new right about now.
85          */
86         primary->tail = 0;
87         primary->space = primary->size;
88         primary->last_flush = 0;
89
90         sarea_priv->last_wrap = 0;
91
92         /* FIXME: Reset counters, buffer ages etc...
93          */
94
95         /* FIXME: What else do we need to reinitialize?  WARP stuff?
96          */
97
98         return 0;
99 }
100
101 /* ================================================================
102  * Primary DMA stream
103  */
104
105 void mga_do_dma_flush(drm_mga_private_t *dev_priv)
106 {
107         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
108         u32 head, tail;
109         u32 status = 0;
110         int i;
111         DMA_LOCALS;
112         DRM_DEBUG("\n");
113
114         /* We need to wait so that we can do an safe flush */
115         for (i = 0; i < dev_priv->usec_timeout; i++) {
116                 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
117                 if (status == MGA_ENDPRDMASTS)
118                         break;
119                 DRM_UDELAY(1);
120         }
121
122         if (primary->tail == primary->last_flush) {
123                 DRM_DEBUG("   bailing out...\n");
124                 return;
125         }
126
127         tail = primary->tail + dev_priv->primary->offset;
128
129         /* We need to pad the stream between flushes, as the card
130          * actually (partially?) reads the first of these commands.
131          * See page 4-16 in the G400 manual, middle of the page or so.
132          */
133         BEGIN_DMA(1);
134
135         DMA_BLOCK(MGA_DMAPAD, 0x00000000,
136                   MGA_DMAPAD, 0x00000000,
137                   MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
138
139         ADVANCE_DMA();
140
141         primary->last_flush = primary->tail;
142
143         head = MGA_READ(MGA_PRIMADDRESS);
144
145         if (head <= tail)
146                 primary->space = primary->size - primary->tail;
147         else
148                 primary->space = head - tail;
149
150         DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
151         DRM_DEBUG("   tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
152         DRM_DEBUG("  space = 0x%06x\n", primary->space);
153
154         mga_flush_write_combine();
155         MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
156
157         DRM_DEBUG("done.\n");
158 }
159
160 void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
161 {
162         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
163         u32 head, tail;
164         DMA_LOCALS;
165         DRM_DEBUG("\n");
166
167         BEGIN_DMA_WRAP();
168
169         DMA_BLOCK(MGA_DMAPAD, 0x00000000,
170                   MGA_DMAPAD, 0x00000000,
171                   MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
172
173         ADVANCE_DMA();
174
175         tail = primary->tail + dev_priv->primary->offset;
176
177         primary->tail = 0;
178         primary->last_flush = 0;
179         primary->last_wrap++;
180
181         head = MGA_READ(MGA_PRIMADDRESS);
182
183         if (head == dev_priv->primary->offset)
184                 primary->space = primary->size;
185         else
186                 primary->space = head - dev_priv->primary->offset;
187
188         DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
189         DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
190         DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
191         DRM_DEBUG("  space = 0x%06x\n", primary->space);
192
193         mga_flush_write_combine();
194         MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
195
196         set_bit(0, &primary->wrapped);
197         DRM_DEBUG("done.\n");
198 }
199
200 void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
201 {
202         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
203         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
204         u32 head = dev_priv->primary->offset;
205         DRM_DEBUG("\n");
206
207         sarea_priv->last_wrap++;
208         DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
209
210         mga_flush_write_combine();
211         MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
212
213         clear_bit(0, &primary->wrapped);
214         DRM_DEBUG("done.\n");
215 }
216
217 /* ================================================================
218  * Freelist management
219  */
220
221 #define MGA_BUFFER_USED         (~0)
222 #define MGA_BUFFER_FREE         0
223
224 #if MGA_FREELIST_DEBUG
225 static void mga_freelist_print(struct drm_device *dev)
226 {
227         drm_mga_private_t *dev_priv = dev->dev_private;
228         drm_mga_freelist_t *entry;
229
230         DRM_INFO("\n");
231         DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
232                  dev_priv->sarea_priv->last_dispatch,
233                  (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
234                                 dev_priv->primary->offset));
235         DRM_INFO("current freelist:\n");
236
237         for (entry = dev_priv->head->next; entry; entry = entry->next) {
238                 DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
239                          entry, entry->buf->idx, entry->age.head,
240                          (unsigned long)(entry->age.head - dev_priv->primary->offset));
241         }
242         DRM_INFO("\n");
243 }
244 #endif
245
246 static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
247 {
248         struct drm_device_dma *dma = dev->dma;
249         struct drm_buf *buf;
250         drm_mga_buf_priv_t *buf_priv;
251         drm_mga_freelist_t *entry;
252         int i;
253         DRM_DEBUG("count=%d\n", dma->buf_count);
254
255         dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
256         if (dev_priv->head == NULL)
257                 return -ENOMEM;
258
259         SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
260
261         for (i = 0; i < dma->buf_count; i++) {
262                 buf = dma->buflist[i];
263                 buf_priv = buf->dev_private;
264
265                 entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
266                 if (entry == NULL)
267                         return -ENOMEM;
268
269                 entry->next = dev_priv->head->next;
270                 entry->prev = dev_priv->head;
271                 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
272                 entry->buf = buf;
273
274                 if (dev_priv->head->next != NULL)
275                         dev_priv->head->next->prev = entry;
276                 if (entry->next == NULL)
277                         dev_priv->tail = entry;
278
279                 buf_priv->list_entry = entry;
280                 buf_priv->discard = 0;
281                 buf_priv->dispatched = 0;
282
283                 dev_priv->head->next = entry;
284         }
285
286         return 0;
287 }
288
289 static void mga_freelist_cleanup(struct drm_device *dev)
290 {
291         drm_mga_private_t *dev_priv = dev->dev_private;
292         drm_mga_freelist_t *entry;
293         drm_mga_freelist_t *next;
294         DRM_DEBUG("\n");
295
296         entry = dev_priv->head;
297         while (entry) {
298                 next = entry->next;
299                 kfree(entry);
300                 entry = next;
301         }
302
303         dev_priv->head = dev_priv->tail = NULL;
304 }
305
306 #if 0
307 /* FIXME: Still needed?
308  */
309 static void mga_freelist_reset(struct drm_device *dev)
310 {
311         struct drm_device_dma *dma = dev->dma;
312         struct drm_buf *buf;
313         drm_mga_buf_priv_t *buf_priv;
314         int i;
315
316         for (i = 0; i < dma->buf_count; i++) {
317                 buf = dma->buflist[i];
318                 buf_priv = buf->dev_private;
319                 SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
320         }
321 }
322 #endif
323
324 static struct drm_buf *mga_freelist_get(struct drm_device * dev)
325 {
326         drm_mga_private_t *dev_priv = dev->dev_private;
327         drm_mga_freelist_t *next;
328         drm_mga_freelist_t *prev;
329         drm_mga_freelist_t *tail = dev_priv->tail;
330         u32 head, wrap;
331         DRM_DEBUG("\n");
332
333         head = MGA_READ(MGA_PRIMADDRESS);
334         wrap = dev_priv->sarea_priv->last_wrap;
335
336         DRM_DEBUG("   tail=0x%06lx %d\n",
337                   tail->age.head ?
338                   (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
339                   tail->age.wrap);
340         DRM_DEBUG("   head=0x%06lx %d\n",
341                   (unsigned long)(head - dev_priv->primary->offset), wrap);
342
343         if (TEST_AGE(&tail->age, head, wrap)) {
344                 prev = dev_priv->tail->prev;
345                 next = dev_priv->tail;
346                 prev->next = NULL;
347                 next->prev = next->next = NULL;
348                 dev_priv->tail = prev;
349                 SET_AGE(&next->age, MGA_BUFFER_USED, 0);
350                 return next->buf;
351         }
352
353         DRM_DEBUG("returning NULL!\n");
354         return NULL;
355 }
356
357 int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
358 {
359         drm_mga_private_t *dev_priv = dev->dev_private;
360         drm_mga_buf_priv_t *buf_priv = buf->dev_private;
361         drm_mga_freelist_t *head, *entry, *prev;
362
363         DRM_DEBUG("age=0x%06lx wrap=%d\n",
364                   (unsigned long)(buf_priv->list_entry->age.head -
365                                   dev_priv->primary->offset),
366                   buf_priv->list_entry->age.wrap);
367
368         entry = buf_priv->list_entry;
369         head = dev_priv->head;
370
371         if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
372                 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
373                 prev = dev_priv->tail;
374                 prev->next = entry;
375                 entry->prev = prev;
376                 entry->next = NULL;
377         } else {
378                 prev = head->next;
379                 head->next = entry;
380                 prev->prev = entry;
381                 entry->prev = head;
382                 entry->next = prev;
383         }
384
385         return 0;
386 }
387
388 /* ================================================================
389  * DMA initialization, cleanup
390  */
391
392 int mga_driver_load(struct drm_device *dev, unsigned long flags)
393 {
394         drm_mga_private_t *dev_priv;
395         int ret;
396
397         dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
398         if (!dev_priv)
399                 return -ENOMEM;
400
401         dev->dev_private = (void *)dev_priv;
402
403         dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
404         dev_priv->chipset = flags;
405
406         dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
407         dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
408
409         dev->counters += 3;
410         dev->types[6] = _DRM_STAT_IRQ;
411         dev->types[7] = _DRM_STAT_PRIMARY;
412         dev->types[8] = _DRM_STAT_SECONDARY;
413
414         ret = drm_vblank_init(dev, 1);
415
416         if (ret) {
417                 (void) mga_driver_unload(dev);
418                 return ret;
419         }
420
421         return 0;
422 }
423
424 #if __OS_HAS_AGP
425 /**
426  * Bootstrap the driver for AGP DMA.
427  *
428  * \todo
429  * Investigate whether there is any benefit to storing the WARP microcode in
430  * AGP memory.  If not, the microcode may as well always be put in PCI
431  * memory.
432  *
433  * \todo
434  * This routine needs to set dma_bs->agp_mode to the mode actually configured
435  * in the hardware.  Looking just at the Linux AGP driver code, I don't see
436  * an easy way to determine this.
437  *
438  * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
439  */
440 static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
441                                     drm_mga_dma_bootstrap_t *dma_bs)
442 {
443         drm_mga_private_t *const dev_priv =
444             (drm_mga_private_t *) dev->dev_private;
445         unsigned int warp_size = MGA_WARP_UCODE_SIZE;
446         int err;
447         unsigned offset;
448         const unsigned secondary_size = dma_bs->secondary_bin_count
449             * dma_bs->secondary_bin_size;
450         const unsigned agp_size = (dma_bs->agp_size << 20);
451         struct drm_buf_desc req;
452         struct drm_agp_mode mode;
453         struct drm_agp_info info;
454         struct drm_agp_buffer agp_req;
455         struct drm_agp_binding bind_req;
456
457         /* Acquire AGP. */
458         err = drm_agp_acquire(dev);
459         if (err) {
460                 DRM_ERROR("Unable to acquire AGP: %d\n", err);
461                 return err;
462         }
463
464         err = drm_agp_info(dev, &info);
465         if (err) {
466                 DRM_ERROR("Unable to get AGP info: %d\n", err);
467                 return err;
468         }
469
470         mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
471         err = drm_agp_enable(dev, mode);
472         if (err) {
473                 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
474                 return err;
475         }
476
477         /* In addition to the usual AGP mode configuration, the G200 AGP cards
478          * need to have the AGP mode "manually" set.
479          */
480
481         if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
482                 if (mode.mode & 0x02)
483                         MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
484                 else
485                         MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
486         }
487
488         /* Allocate and bind AGP memory. */
489         agp_req.size = agp_size;
490         agp_req.type = 0;
491         err = drm_agp_alloc(dev, &agp_req);
492         if (err) {
493                 dev_priv->agp_size = 0;
494                 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
495                           dma_bs->agp_size);
496                 return err;
497         }
498
499         dev_priv->agp_size = agp_size;
500         dev_priv->agp_handle = agp_req.handle;
501
502         bind_req.handle = agp_req.handle;
503         bind_req.offset = 0;
504         err = drm_agp_bind(dev, &bind_req);
505         if (err) {
506                 DRM_ERROR("Unable to bind AGP memory: %d\n", err);
507                 return err;
508         }
509
510         /* Make drm_addbufs happy by not trying to create a mapping for less
511          * than a page.
512          */
513         if (warp_size < PAGE_SIZE)
514                 warp_size = PAGE_SIZE;
515
516         offset = 0;
517         err = drm_addmap(dev, offset, warp_size,
518                          _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
519         if (err) {
520                 DRM_ERROR("Unable to map WARP microcode: %d\n", err);
521                 return err;
522         }
523
524         offset += warp_size;
525         err = drm_addmap(dev, offset, dma_bs->primary_size,
526                          _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
527         if (err) {
528                 DRM_ERROR("Unable to map primary DMA region: %d\n", err);
529                 return err;
530         }
531
532         offset += dma_bs->primary_size;
533         err = drm_addmap(dev, offset, secondary_size,
534                          _DRM_AGP, 0, &dev->agp_buffer_map);
535         if (err) {
536                 DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
537                 return err;
538         }
539
540         (void)memset(&req, 0, sizeof(req));
541         req.count = dma_bs->secondary_bin_count;
542         req.size = dma_bs->secondary_bin_size;
543         req.flags = _DRM_AGP_BUFFER;
544         req.agp_start = offset;
545
546         err = drm_addbufs_agp(dev, &req);
547         if (err) {
548                 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
549                 return err;
550         }
551
552         {
553                 struct drm_map_list *_entry;
554                 unsigned long agp_token = 0;
555
556                 list_for_each_entry(_entry, &dev->maplist, head) {
557                         if (_entry->map == dev->agp_buffer_map)
558                                 agp_token = _entry->user_token;
559                 }
560                 if (!agp_token)
561                         return -EFAULT;
562
563                 dev->agp_buffer_token = agp_token;
564         }
565
566         offset += secondary_size;
567         err = drm_addmap(dev, offset, agp_size - offset,
568                          _DRM_AGP, 0, &dev_priv->agp_textures);
569         if (err) {
570                 DRM_ERROR("Unable to map AGP texture region %d\n", err);
571                 return err;
572         }
573
574         drm_core_ioremap(dev_priv->warp, dev);
575         drm_core_ioremap(dev_priv->primary, dev);
576         drm_core_ioremap(dev->agp_buffer_map, dev);
577
578         if (!dev_priv->warp->handle ||
579             !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
580                 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
581                           dev_priv->warp->handle, dev_priv->primary->handle,
582                           dev->agp_buffer_map->handle);
583                 return -ENOMEM;
584         }
585
586         dev_priv->dma_access = MGA_PAGPXFER;
587         dev_priv->wagp_enable = MGA_WAGP_ENABLE;
588
589         DRM_INFO("Initialized card for AGP DMA.\n");
590         return 0;
591 }
592 #else
593 static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
594                                     drm_mga_dma_bootstrap_t *dma_bs)
595 {
596         return -EINVAL;
597 }
598 #endif
599
600 /**
601  * Bootstrap the driver for PCI DMA.
602  *
603  * \todo
604  * The algorithm for decreasing the size of the primary DMA buffer could be
605  * better.  The size should be rounded up to the nearest page size, then
606  * decrease the request size by a single page each pass through the loop.
607  *
608  * \todo
609  * Determine whether the maximum address passed to drm_pci_alloc is correct.
610  * The same goes for drm_addbufs_pci.
611  *
612  * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
613  */
614 static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
615                                     drm_mga_dma_bootstrap_t *dma_bs)
616 {
617         drm_mga_private_t *const dev_priv =
618             (drm_mga_private_t *) dev->dev_private;
619         unsigned int warp_size = MGA_WARP_UCODE_SIZE;
620         unsigned int primary_size;
621         unsigned int bin_count;
622         int err;
623         struct drm_buf_desc req;
624
625         if (dev->dma == NULL) {
626                 DRM_ERROR("dev->dma is NULL\n");
627                 return -EFAULT;
628         }
629
630         /* Make drm_addbufs happy by not trying to create a mapping for less
631          * than a page.
632          */
633         if (warp_size < PAGE_SIZE)
634                 warp_size = PAGE_SIZE;
635
636         /* The proper alignment is 0x100 for this mapping */
637         err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
638                          _DRM_READ_ONLY, &dev_priv->warp);
639         if (err != 0) {
640                 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
641                           err);
642                 return err;
643         }
644
645         /* Other than the bottom two bits being used to encode other
646          * information, there don't appear to be any restrictions on the
647          * alignment of the primary or secondary DMA buffers.
648          */
649
650         for (primary_size = dma_bs->primary_size; primary_size != 0;
651              primary_size >>= 1) {
652                 /* The proper alignment for this mapping is 0x04 */
653                 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
654                                  _DRM_READ_ONLY, &dev_priv->primary);
655                 if (!err)
656                         break;
657         }
658
659         if (err != 0) {
660                 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
661                 return -ENOMEM;
662         }
663
664         if (dev_priv->primary->size != dma_bs->primary_size) {
665                 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
666                          dma_bs->primary_size,
667                          (unsigned)dev_priv->primary->size);
668                 dma_bs->primary_size = dev_priv->primary->size;
669         }
670
671         for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
672              bin_count--) {
673                 (void)memset(&req, 0, sizeof(req));
674                 req.count = bin_count;
675                 req.size = dma_bs->secondary_bin_size;
676
677                 err = drm_addbufs_pci(dev, &req);
678                 if (!err)
679                         break;
680         }
681
682         if (bin_count == 0) {
683                 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
684                 return err;
685         }
686
687         if (bin_count != dma_bs->secondary_bin_count) {
688                 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
689                          "to %u.\n", dma_bs->secondary_bin_count, bin_count);
690
691                 dma_bs->secondary_bin_count = bin_count;
692         }
693
694         dev_priv->dma_access = 0;
695         dev_priv->wagp_enable = 0;
696
697         dma_bs->agp_mode = 0;
698
699         DRM_INFO("Initialized card for PCI DMA.\n");
700         return 0;
701 }
702
703 static int mga_do_dma_bootstrap(struct drm_device *dev,
704                                 drm_mga_dma_bootstrap_t *dma_bs)
705 {
706         const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
707         int err;
708         drm_mga_private_t *const dev_priv =
709             (drm_mga_private_t *) dev->dev_private;
710
711         dev_priv->used_new_dma_init = 1;
712
713         /* The first steps are the same for both PCI and AGP based DMA.  Map
714          * the cards MMIO registers and map a status page.
715          */
716         err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
717                          _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
718         if (err) {
719                 DRM_ERROR("Unable to map MMIO region: %d\n", err);
720                 return err;
721         }
722
723         err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
724                          _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
725                          &dev_priv->status);
726         if (err) {
727                 DRM_ERROR("Unable to map status region: %d\n", err);
728                 return err;
729         }
730
731         /* The DMA initialization procedure is slightly different for PCI and
732          * AGP cards.  AGP cards just allocate a large block of AGP memory and
733          * carve off portions of it for internal uses.  The remaining memory
734          * is returned to user-mode to be used for AGP textures.
735          */
736         if (is_agp)
737                 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
738
739         /* If we attempted to initialize the card for AGP DMA but failed,
740          * clean-up any mess that may have been created.
741          */
742
743         if (err)
744                 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
745
746         /* Not only do we want to try and initialized PCI cards for PCI DMA,
747          * but we also try to initialized AGP cards that could not be
748          * initialized for AGP DMA.  This covers the case where we have an AGP
749          * card in a system with an unsupported AGP chipset.  In that case the
750          * card will be detected as AGP, but we won't be able to allocate any
751          * AGP memory, etc.
752          */
753
754         if (!is_agp || err)
755                 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
756
757         return err;
758 }
759
760 int mga_dma_bootstrap(struct drm_device *dev, void *data,
761                       struct drm_file *file_priv)
762 {
763         drm_mga_dma_bootstrap_t *bootstrap = data;
764         int err;
765         static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
766         const drm_mga_private_t *const dev_priv =
767                 (drm_mga_private_t *) dev->dev_private;
768
769         err = mga_do_dma_bootstrap(dev, bootstrap);
770         if (err) {
771                 mga_do_cleanup_dma(dev, FULL_CLEANUP);
772                 return err;
773         }
774
775         if (dev_priv->agp_textures != NULL) {
776                 bootstrap->texture_handle = dev_priv->agp_textures->offset;
777                 bootstrap->texture_size = dev_priv->agp_textures->size;
778         } else {
779                 bootstrap->texture_handle = 0;
780                 bootstrap->texture_size = 0;
781         }
782
783         bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
784
785         return err;
786 }
787
788 static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
789 {
790         drm_mga_private_t *dev_priv;
791         int ret;
792         DRM_DEBUG("\n");
793
794         dev_priv = dev->dev_private;
795
796         if (init->sgram)
797                 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
798         else
799                 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
800         dev_priv->maccess = init->maccess;
801
802         dev_priv->fb_cpp = init->fb_cpp;
803         dev_priv->front_offset = init->front_offset;
804         dev_priv->front_pitch = init->front_pitch;
805         dev_priv->back_offset = init->back_offset;
806         dev_priv->back_pitch = init->back_pitch;
807
808         dev_priv->depth_cpp = init->depth_cpp;
809         dev_priv->depth_offset = init->depth_offset;
810         dev_priv->depth_pitch = init->depth_pitch;
811
812         /* FIXME: Need to support AGP textures...
813          */
814         dev_priv->texture_offset = init->texture_offset[0];
815         dev_priv->texture_size = init->texture_size[0];
816
817         dev_priv->sarea = drm_getsarea(dev);
818         if (!dev_priv->sarea) {
819                 DRM_ERROR("failed to find sarea!\n");
820                 return -EINVAL;
821         }
822
823         if (!dev_priv->used_new_dma_init) {
824
825                 dev_priv->dma_access = MGA_PAGPXFER;
826                 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
827
828                 dev_priv->status = drm_core_findmap(dev, init->status_offset);
829                 if (!dev_priv->status) {
830                         DRM_ERROR("failed to find status page!\n");
831                         return -EINVAL;
832                 }
833                 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
834                 if (!dev_priv->mmio) {
835                         DRM_ERROR("failed to find mmio region!\n");
836                         return -EINVAL;
837                 }
838                 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
839                 if (!dev_priv->warp) {
840                         DRM_ERROR("failed to find warp microcode region!\n");
841                         return -EINVAL;
842                 }
843                 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
844                 if (!dev_priv->primary) {
845                         DRM_ERROR("failed to find primary dma region!\n");
846                         return -EINVAL;
847                 }
848                 dev->agp_buffer_token = init->buffers_offset;
849                 dev->agp_buffer_map =
850                     drm_core_findmap(dev, init->buffers_offset);
851                 if (!dev->agp_buffer_map) {
852                         DRM_ERROR("failed to find dma buffer region!\n");
853                         return -EINVAL;
854                 }
855
856                 drm_core_ioremap(dev_priv->warp, dev);
857                 drm_core_ioremap(dev_priv->primary, dev);
858                 drm_core_ioremap(dev->agp_buffer_map, dev);
859         }
860
861         dev_priv->sarea_priv =
862             (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
863                                  init->sarea_priv_offset);
864
865         if (!dev_priv->warp->handle ||
866             !dev_priv->primary->handle ||
867             ((dev_priv->dma_access != 0) &&
868              ((dev->agp_buffer_map == NULL) ||
869               (dev->agp_buffer_map->handle == NULL)))) {
870                 DRM_ERROR("failed to ioremap agp regions!\n");
871                 return -ENOMEM;
872         }
873
874         ret = mga_warp_install_microcode(dev_priv);
875         if (ret < 0) {
876                 DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
877                 return ret;
878         }
879
880         ret = mga_warp_init(dev_priv);
881         if (ret < 0) {
882                 DRM_ERROR("failed to init WARP engine!: %d\n", ret);
883                 return ret;
884         }
885
886         dev_priv->prim.status = (u32 *) dev_priv->status->handle;
887
888         mga_do_wait_for_idle(dev_priv);
889
890         /* Init the primary DMA registers.
891          */
892         MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
893 #if 0
894         MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |    /* Soft trap, SECEND, SETUPEND */
895                   MGA_PRIMPTREN1);      /* DWGSYNC */
896 #endif
897
898         dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
899         dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
900                               + dev_priv->primary->size);
901         dev_priv->prim.size = dev_priv->primary->size;
902
903         dev_priv->prim.tail = 0;
904         dev_priv->prim.space = dev_priv->prim.size;
905         dev_priv->prim.wrapped = 0;
906
907         dev_priv->prim.last_flush = 0;
908         dev_priv->prim.last_wrap = 0;
909
910         dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
911
912         dev_priv->prim.status[0] = dev_priv->primary->offset;
913         dev_priv->prim.status[1] = 0;
914
915         dev_priv->sarea_priv->last_wrap = 0;
916         dev_priv->sarea_priv->last_frame.head = 0;
917         dev_priv->sarea_priv->last_frame.wrap = 0;
918
919         if (mga_freelist_init(dev, dev_priv) < 0) {
920                 DRM_ERROR("could not initialize freelist\n");
921                 return -ENOMEM;
922         }
923
924         return 0;
925 }
926
927 static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
928 {
929         int err = 0;
930         DRM_DEBUG("\n");
931
932         /* Make sure interrupts are disabled here because the uninstall ioctl
933          * may not have been called from userspace and after dev_private
934          * is freed, it's too late.
935          */
936         if (dev->irq_enabled)
937                 drm_irq_uninstall(dev);
938
939         if (dev->dev_private) {
940                 drm_mga_private_t *dev_priv = dev->dev_private;
941
942                 if ((dev_priv->warp != NULL)
943                     && (dev_priv->warp->type != _DRM_CONSISTENT))
944                         drm_core_ioremapfree(dev_priv->warp, dev);
945
946                 if ((dev_priv->primary != NULL)
947                     && (dev_priv->primary->type != _DRM_CONSISTENT))
948                         drm_core_ioremapfree(dev_priv->primary, dev);
949
950                 if (dev->agp_buffer_map != NULL)
951                         drm_core_ioremapfree(dev->agp_buffer_map, dev);
952
953                 if (dev_priv->used_new_dma_init) {
954 #if __OS_HAS_AGP
955                         if (dev_priv->agp_handle != 0) {
956                                 struct drm_agp_binding unbind_req;
957                                 struct drm_agp_buffer free_req;
958
959                                 unbind_req.handle = dev_priv->agp_handle;
960                                 drm_agp_unbind(dev, &unbind_req);
961
962                                 free_req.handle = dev_priv->agp_handle;
963                                 drm_agp_free(dev, &free_req);
964
965                                 dev_priv->agp_textures = NULL;
966                                 dev_priv->agp_size = 0;
967                                 dev_priv->agp_handle = 0;
968                         }
969
970                         if ((dev->agp != NULL) && dev->agp->acquired)
971                                 err = drm_agp_release(dev);
972 #endif
973                 }
974
975                 dev_priv->warp = NULL;
976                 dev_priv->primary = NULL;
977                 dev_priv->sarea = NULL;
978                 dev_priv->sarea_priv = NULL;
979                 dev->agp_buffer_map = NULL;
980
981                 if (full_cleanup) {
982                         dev_priv->mmio = NULL;
983                         dev_priv->status = NULL;
984                         dev_priv->used_new_dma_init = 0;
985                 }
986
987                 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
988                 dev_priv->warp_pipe = 0;
989                 memset(dev_priv->warp_pipe_phys, 0,
990                        sizeof(dev_priv->warp_pipe_phys));
991
992                 if (dev_priv->head != NULL)
993                         mga_freelist_cleanup(dev);
994         }
995
996         return err;
997 }
998
999 int mga_dma_init(struct drm_device *dev, void *data,
1000                  struct drm_file *file_priv)
1001 {
1002         drm_mga_init_t *init = data;
1003         int err;
1004
1005         LOCK_TEST_WITH_RETURN(dev, file_priv);
1006
1007         switch (init->func) {
1008         case MGA_INIT_DMA:
1009                 err = mga_do_init_dma(dev, init);
1010                 if (err)
1011                         (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1012                 return err;
1013         case MGA_CLEANUP_DMA:
1014                 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1015         }
1016
1017         return -EINVAL;
1018 }
1019
1020 /* ================================================================
1021  * Primary DMA stream management
1022  */
1023
1024 int mga_dma_flush(struct drm_device *dev, void *data,
1025                   struct drm_file *file_priv)
1026 {
1027         drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1028         struct drm_lock *lock = data;
1029
1030         LOCK_TEST_WITH_RETURN(dev, file_priv);
1031
1032         DRM_DEBUG("%s%s%s\n",
1033                   (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1034                   (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1035                   (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1036
1037         WRAP_WAIT_WITH_RETURN(dev_priv);
1038
1039         if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
1040                 mga_do_dma_flush(dev_priv);
1041
1042         if (lock->flags & _DRM_LOCK_QUIESCENT) {
1043 #if MGA_DMA_DEBUG
1044                 int ret = mga_do_wait_for_idle(dev_priv);
1045                 if (ret < 0)
1046                         DRM_INFO("-EBUSY\n");
1047                 return ret;
1048 #else
1049                 return mga_do_wait_for_idle(dev_priv);
1050 #endif
1051         } else {
1052                 return 0;
1053         }
1054 }
1055
1056 int mga_dma_reset(struct drm_device *dev, void *data,
1057                   struct drm_file *file_priv)
1058 {
1059         drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1060
1061         LOCK_TEST_WITH_RETURN(dev, file_priv);
1062
1063         return mga_do_dma_reset(dev_priv);
1064 }
1065
1066 /* ================================================================
1067  * DMA buffer management
1068  */
1069
1070 static int mga_dma_get_buffers(struct drm_device *dev,
1071                                struct drm_file *file_priv, struct drm_dma *d)
1072 {
1073         struct drm_buf *buf;
1074         int i;
1075
1076         for (i = d->granted_count; i < d->request_count; i++) {
1077                 buf = mga_freelist_get(dev);
1078                 if (!buf)
1079                         return -EAGAIN;
1080
1081                 buf->file_priv = file_priv;
1082
1083                 if (DRM_COPY_TO_USER(&d->request_indices[i],
1084                                      &buf->idx, sizeof(buf->idx)))
1085                         return -EFAULT;
1086                 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1087                                      &buf->total, sizeof(buf->total)))
1088                         return -EFAULT;
1089
1090                 d->granted_count++;
1091         }
1092         return 0;
1093 }
1094
1095 int mga_dma_buffers(struct drm_device *dev, void *data,
1096                     struct drm_file *file_priv)
1097 {
1098         struct drm_device_dma *dma = dev->dma;
1099         drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1100         struct drm_dma *d = data;
1101         int ret = 0;
1102
1103         LOCK_TEST_WITH_RETURN(dev, file_priv);
1104
1105         /* Please don't send us buffers.
1106          */
1107         if (d->send_count != 0) {
1108                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1109                           DRM_CURRENTPID, d->send_count);
1110                 return -EINVAL;
1111         }
1112
1113         /* We'll send you buffers.
1114          */
1115         if (d->request_count < 0 || d->request_count > dma->buf_count) {
1116                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1117                           DRM_CURRENTPID, d->request_count, dma->buf_count);
1118                 return -EINVAL;
1119         }
1120
1121         WRAP_TEST_WITH_RETURN(dev_priv);
1122
1123         d->granted_count = 0;
1124
1125         if (d->request_count)
1126                 ret = mga_dma_get_buffers(dev, file_priv, d);
1127
1128         return ret;
1129 }
1130
1131 /**
1132  * Called just before the module is unloaded.
1133  */
1134 int mga_driver_unload(struct drm_device *dev)
1135 {
1136         kfree(dev->dev_private);
1137         dev->dev_private = NULL;
1138
1139         return 0;
1140 }
1141
1142 /**
1143  * Called when the last opener of the device is closed.
1144  */
1145 void mga_driver_lastclose(struct drm_device *dev)
1146 {
1147         mga_do_cleanup_dma(dev, FULL_CLEANUP);
1148 }
1149
1150 int mga_driver_dma_quiescent(struct drm_device *dev)
1151 {
1152         drm_mga_private_t *dev_priv = dev->dev_private;
1153         return mga_do_wait_for_idle(dev_priv);
1154 }