mm, mempool: do not allow atomic resizing
authorDavid Rientjes <rientjes@google.com>
Tue, 14 Apr 2015 22:48:21 +0000 (15:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Apr 2015 23:49:06 +0000 (16:49 -0700)
Allocating a large number of elements in atomic context could quickly
deplete memory reserves, so just disallow atomic resizing entirely.

Nothing currently uses mempool_resize() with anything other than
GFP_KERNEL, so convert existing callers to drop the gfp_mask.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: David Rientjes <rientjes@google.com>
Acked-by: Steffen Maier <maier@linux.vnet.ibm.com> [zfcp]
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Steve French <sfrench@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/s390/scsi/zfcp_erp.c
fs/cifs/connect.c
include/linux/mempool.h
mm/mempool.c

index 2c5d456..acde3f5 100644 (file)
@@ -738,11 +738,11 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
                return ZFCP_ERP_FAILED;
 
        if (mempool_resize(act->adapter->pool.sr_data,
-                          act->adapter->stat_read_buf_num, GFP_KERNEL))
+                          act->adapter->stat_read_buf_num))
                return ZFCP_ERP_FAILED;
 
        if (mempool_resize(act->adapter->pool.status_read_req,
-                          act->adapter->stat_read_buf_num, GFP_KERNEL))
+                          act->adapter->stat_read_buf_num))
                return ZFCP_ERP_FAILED;
 
        atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
index 480cf9c..f3bfe08 100644 (file)
@@ -773,8 +773,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
 
        length = atomic_dec_return(&tcpSesAllocCount);
        if (length > 0)
-               mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
-                               GFP_KERNEL);
+               mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
 }
 
 static int
@@ -848,8 +847,7 @@ cifs_demultiplex_thread(void *p)
 
        length = atomic_inc_return(&tcpSesAllocCount);
        if (length > 1)
-               mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
-                               GFP_KERNEL);
+               mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
 
        set_freezable();
        while (server->tcpStatus != CifsExiting) {
index 39ed62a..b19b302 100644 (file)
@@ -29,7 +29,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
                        mempool_free_t *free_fn, void *pool_data,
                        gfp_t gfp_mask, int nid);
 
-extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr);
 extern void mempool_destroy(mempool_t *pool);
 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
 extern void mempool_free(void *element, mempool_t *pool);
index e209c98..949970d 100644 (file)
@@ -113,23 +113,24 @@ EXPORT_SYMBOL(mempool_create_node);
  *              mempool_create().
  * @new_min_nr: the new minimum number of elements guaranteed to be
  *              allocated for this pool.
- * @gfp_mask:   the usual allocation bitmask.
  *
  * This function shrinks/grows the pool. In the case of growing,
  * it cannot be guaranteed that the pool will be grown to the new
  * size immediately, but new mempool_free() calls will refill it.
+ * This function may sleep.
  *
  * Note, the caller must guarantee that no mempool_destroy is called
  * while this function is running. mempool_alloc() & mempool_free()
  * might be called (eg. from IRQ contexts) while this function executes.
  */
-int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
+int mempool_resize(mempool_t *pool, int new_min_nr)
 {
        void *element;
        void **new_elements;
        unsigned long flags;
 
        BUG_ON(new_min_nr <= 0);
+       might_sleep();
 
        spin_lock_irqsave(&pool->lock, flags);
        if (new_min_nr <= pool->min_nr) {
@@ -145,7 +146,8 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
        spin_unlock_irqrestore(&pool->lock, flags);
 
        /* Grow the pool */
-       new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
+       new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
+                                    GFP_KERNEL);
        if (!new_elements)
                return -ENOMEM;
 
@@ -164,7 +166,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
 
        while (pool->curr_nr < pool->min_nr) {
                spin_unlock_irqrestore(&pool->lock, flags);
-               element = pool->alloc(gfp_mask, pool->pool_data);
+               element = pool->alloc(GFP_KERNEL, pool->pool_data);
                if (!element)
                        goto out;
                spin_lock_irqsave(&pool->lock, flags);