net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / mm / dmapool.c
index c5ab33b..da1b0f0 100644 (file)
@@ -50,7 +50,6 @@ struct dma_pool {             /* the pool */
        size_t allocation;
        size_t boundary;
        char name[32];
-       wait_queue_head_t waitq;
        struct list_head pools;
 };
 
@@ -62,8 +61,6 @@ struct dma_page {             /* cacheable header for 'allocation' bytes */
        unsigned int offset;
 };
 
-#define        POOL_TIMEOUT_JIFFIES    ((100 /* msec */ * HZ) / 1000)
-
 static DEFINE_MUTEX(pools_lock);
 
 static ssize_t
@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
        retval->size = size;
        retval->boundary = boundary;
        retval->allocation = allocation;
-       init_waitqueue_head(&retval->waitq);
 
        if (dev) {
                int ret;
@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
                memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
                pool_initialise_page(pool, page);
-               list_add(&page->page_list, &pool->page_list);
                page->in_use = 0;
                page->offset = 0;
        } else {
@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
        might_sleep_if(mem_flags & __GFP_WAIT);
 
        spin_lock_irqsave(&pool->lock, flags);
- restart:
        list_for_each_entry(page, &pool->page_list, page_list) {
                if (page->offset < pool->allocation)
                        goto ready;
        }
-       page = pool_alloc_page(pool, GFP_ATOMIC);
-       if (!page) {
-               if (mem_flags & __GFP_WAIT) {
-                       DECLARE_WAITQUEUE(wait, current);
 
-                       __set_current_state(TASK_UNINTERRUPTIBLE);
-                       __add_wait_queue(&pool->waitq, &wait);
-                       spin_unlock_irqrestore(&pool->lock, flags);
+       /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
+       spin_unlock_irqrestore(&pool->lock, flags);
 
-                       schedule_timeout(POOL_TIMEOUT_JIFFIES);
+       page = pool_alloc_page(pool, mem_flags);
+       if (!page)
+               return NULL;
 
-                       spin_lock_irqsave(&pool->lock, flags);
-                       __remove_wait_queue(&pool->waitq, &wait);
-                       goto restart;
-               }
-               retval = NULL;
-               goto done;
-       }
+       spin_lock_irqsave(&pool->lock, flags);
 
+       list_add(&page->page_list, &pool->page_list);
  ready:
        page->in_use++;
        offset = page->offset;
@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 #ifdef DMAPOOL_DEBUG
        memset(retval, POOL_POISON_ALLOCATED, pool->size);
 #endif
- done:
        spin_unlock_irqrestore(&pool->lock, flags);
        return retval;
 }
@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
        page->in_use--;
        *(int *)vaddr = page->offset;
        page->offset = offset;
-       if (waitqueue_active(&pool->waitq))
-               wake_up_locked(&pool->waitq);
        /*
         * Resist a temptation to do
         *    if (!is_page_busy(page)) pool_free_page(pool, page);