Pull novell-bugzilla-156426 into release branch
[pandora-kernel.git] / drivers / char / drm / drm_bufs.c
index 319bdea..006b06d 100644 (file)
 #include <linux/vmalloc.h>
 #include "drmP.h"
 
-unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
+unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
 {
        return pci_resource_start(dev->pdev, resource);
 }
-
 EXPORT_SYMBOL(drm_get_resource_start);
 
-unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
+unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
 {
        return pci_resource_len(dev->pdev, resource);
 }
 
 EXPORT_SYMBOL(drm_get_resource_len);
 
-static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
-                                            drm_local_map_t * map)
+static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
+                                            drm_local_map_t *map)
 {
        struct list_head *list;
 
@@ -74,7 +73,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
 
 #ifdef _LP64
 static __inline__ unsigned int HandleID(unsigned long lhandle,
-                                       drm_device_t * dev)
+                                       drm_device_t *dev)
 {
        static unsigned int map32_handle = START_RANGE;
        unsigned int hash;
@@ -155,7 +154,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
        case _DRM_REGISTERS:
        case _DRM_FRAME_BUFFER:
 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
-               if (map->offset + map->size < map->offset ||
+               if (map->offset + (map->size-1) < map->offset ||
                    map->offset < virt_to_phys(high_memory)) {
                        drm_free(map, sizeof(*map), DRM_MEM_MAPS);
                        return -EINVAL;
@@ -256,14 +255,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
        memset(list, 0, sizeof(*list));
        list->map = map;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        list_add(&list->head, &dev->maplist->head);
        /* Assign a 32-bit handle */
-       /* We do it here so that dev->struct_sem protects the increment */
+       /* We do it here so that dev->struct_mutex protects the increment */
        list->user_token = HandleID(map->type == _DRM_SHM
                                    ? (unsigned long)map->handle
                                    : map->offset, dev);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        *maplist = list;
        return 0;
@@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
                return -EFAULT;
        }
 
+       if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
+               return -EPERM;
+
        err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
                              &maplist);
 
@@ -332,7 +334,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
  *
  * \sa drm_addmap
  */
-int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
+int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
 {
        struct list_head *list;
        drm_map_list_t *r_list = NULL;
@@ -385,21 +387,17 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
        return 0;
 }
 
-EXPORT_SYMBOL(drm_rmmap_locked);
-
-int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
+int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
 {
        int ret;
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        ret = drm_rmmap_locked(dev, map);
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
 
-EXPORT_SYMBOL(drm_rmmap);
-
 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
  * the last close of the device, and this is necessary for cleanup when things
  * exit uncleanly.  Therefore, having userland manually remove mappings seems
@@ -423,7 +421,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
                return -EFAULT;
        }
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        list_for_each(list, &dev->maplist->head) {
                drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
 
@@ -439,7 +437,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
         * find anything.
         */
        if (list == (&dev->maplist->head)) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
@@ -448,13 +446,13 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
 
        /* Register and framebuffer maps are permanent */
        if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                return 0;
        }
 
        ret = drm_rmmap_locked(dev, map);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        return ret;
 }
@@ -474,8 +472,7 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
        if (entry->seg_count) {
                for (i = 0; i < entry->seg_count; i++) {
                        if (entry->seglist[i]) {
-                               drm_free_pages(entry->seglist[i],
-                                              entry->page_order, DRM_MEM_DMA);
+                               drm_pci_free(dev, entry->seglist[i]);
                        }
                }
                drm_free(entry->seglist,
@@ -548,7 +545,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        DRM_DEBUG("count:      %d\n", count);
        DRM_DEBUG("order:      %d\n", order);
        DRM_DEBUG("size:       %d\n", size);
-       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("agp_offset: %lx\n", agp_offset);
        DRM_DEBUG("alignment:  %d\n", alignment);
        DRM_DEBUG("page_order: %d\n", page_order);
        DRM_DEBUG("total:      %d\n", total);
@@ -566,16 +563,16 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
@@ -583,7 +580,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -616,7 +613,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        drm_cleanup_buf_error(dev, entry);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
@@ -638,7 +635,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        if (!temp_buflist) {
                /* Free the entry because it isn't valid */
                drm_cleanup_buf_error(dev, entry);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -649,12 +646,14 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        }
 
        dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
        dma->byte_count += byte_count;
 
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
@@ -664,7 +663,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_dec(&dev->buf_alloc);
        return 0;
 }
-
 EXPORT_SYMBOL(drm_addbufs_agp);
 #endif                         /* __OS_HAS_AGP */
 
@@ -677,7 +675,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        int total;
        int page_order;
        drm_buf_entry_t *entry;
-       unsigned long page;
+       drm_dma_handle_t *dmah;
        drm_buf_t *buf;
        int alignment;
        unsigned long offset;
@@ -689,9 +687,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
 
        if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
                return -EINVAL;
+
        if (!dma)
                return -EINVAL;
 
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        count = request->count;
        order = drm_order(request->size);
        size = 1 << order;
@@ -717,16 +719,16 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
@@ -734,7 +736,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -745,7 +747,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        if (!entry->seglist) {
                drm_free(entry->buflist,
                         count * sizeof(*entry->buflist), DRM_MEM_BUFS);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -761,7 +763,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
                         count * sizeof(*entry->buflist), DRM_MEM_BUFS);
                drm_free(entry->seglist,
                         count * sizeof(*entry->seglist), DRM_MEM_SEGS);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -776,8 +778,10 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        page_count = 0;
 
        while (entry->buf_count < count) {
-               page = drm_alloc_pages(page_order, DRM_MEM_DMA);
-               if (!page) {
+               
+               dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+               
+               if (!dmah) {
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        entry->seg_count = count;
@@ -785,17 +789,17 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
                        drm_free(temp_pagelist,
                                 (dma->page_count + (count << page_order))
                                 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
-               entry->seglist[entry->seg_count++] = page;
+               entry->seglist[entry->seg_count++] = dmah;
                for (i = 0; i < (1 << page_order); i++) {
                        DRM_DEBUG("page %d @ 0x%08lx\n",
                                  dma->page_count + page_count,
-                                 page + PAGE_SIZE * i);
+                                 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
                        temp_pagelist[dma->page_count + page_count++]
-                           = page + PAGE_SIZE * i;
+                               = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
                }
                for (offset = 0;
                     offset + size <= total && entry->buf_count < count;
@@ -806,7 +810,8 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
                        buf->order = order;
                        buf->used = 0;
                        buf->offset = (dma->byte_count + byte_count + offset);
-                       buf->address = (void *)(page + offset);
+                       buf->address = (void *)(dmah->vaddr + offset);
+                       buf->bus_address = dmah->busaddr + offset;
                        buf->next = NULL;
                        buf->waiting = 0;
                        buf->pending = 0;
@@ -826,7 +831,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
                                          (count << page_order))
                                         * sizeof(*dma->pagelist),
                                         DRM_MEM_PAGES);
-                               up(&dev->struct_sem);
+                               mutex_unlock(&dev->struct_mutex);
                                atomic_dec(&dev->buf_alloc);
                                return -ENOMEM;
                        }
@@ -848,7 +853,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
                drm_free(temp_pagelist,
                         (dma->page_count + (count << page_order))
                         * sizeof(*dma->pagelist), DRM_MEM_PAGES);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -873,7 +878,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        dma->page_count += entry->seg_count << page_order;
        dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
@@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        return 0;
 
 }
-
 EXPORT_SYMBOL(drm_addbufs_pci);
 
 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
@@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        if (!dma)
                return -EINVAL;
 
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        count = request->count;
        order = drm_order(request->size);
        size = 1 << order;
@@ -941,16 +948,16 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
@@ -958,7 +965,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -992,7 +999,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        drm_cleanup_buf_error(dev, entry);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
@@ -1015,7 +1022,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        if (!temp_buflist) {
                /* Free the entry because it isn't valid */
                drm_cleanup_buf_error(dev, entry);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -1026,12 +1033,14 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        }
 
        dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
        dma->byte_count += byte_count;
 
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
@@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        if (!dma)
                return -EINVAL;
 
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        count = request->count;
        order = drm_order(request->size);
        size = 1 << order;
@@ -1098,16 +1110,16 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_inc(&dev->buf_alloc);
        spin_unlock(&dev->count_lock);
 
-       down(&dev->struct_sem);
+       mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
        if (entry->buf_count) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM; /* May only call once for each order */
        }
 
        if (count < 0 || count > 4096) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -EINVAL;
        }
@@ -1115,7 +1127,7 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
                                   DRM_MEM_BUFS);
        if (!entry->buflist) {
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -1148,7 +1160,7 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
                        /* Set count correctly so we free the proper amount. */
                        entry->buf_count = count;
                        drm_cleanup_buf_error(dev, entry);
-                       up(&dev->struct_sem);
+                       mutex_unlock(&dev->struct_mutex);
                        atomic_dec(&dev->buf_alloc);
                        return -ENOMEM;
                }
@@ -1170,7 +1182,7 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        if (!temp_buflist) {
                /* Free the entry because it isn't valid */
                drm_cleanup_buf_error(dev, entry);
-               up(&dev->struct_sem);
+               mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
@@ -1181,12 +1193,14 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        }
 
        dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
        dma->byte_count += byte_count;
 
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
        DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
 
-       up(&dev->struct_sem);
+       mutex_unlock(&dev->struct_mutex);
 
        request->count = entry->buf_count;
        request->size = size;
@@ -1197,6 +1211,7 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        return 0;
 }
 
+
 /**
  * Add buffers for DMA transfers (ioctl).
  *
@@ -1577,5 +1592,6 @@ int drm_order(unsigned long size)
 
        return order;
 }
-
 EXPORT_SYMBOL(drm_order);
+
+