static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode)
{
+ unsigned noio_flag;
+ void *ptr;
+
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
}
*data_mode = DATA_MODE_VMALLOC;
- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+ /*
+ * __vmalloc allocates the data pages and auxiliary structures with
+ * gfp_flags that were specified, but pagetables are always allocated
+ * with GFP_KERNEL, no matter what was specified as gfp_mask.
+ *
+ * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+ * all allocations done by this process (including pagetables) are done
+ * as if GFP_NOIO was specified.
+ */
+
+ if (gfp_mask & __GFP_NORETRY) {
+ noio_flag = current->flags & PF_MEMALLOC;
+ current->flags |= PF_MEMALLOC;
+ }
+
+ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+ if (gfp_mask & __GFP_NORETRY)
+ current->flags = (current->flags & ~PF_MEMALLOC) | noio_flag;
+
+ return ptr;
}
/*
b->list_mode = dirty;
list_del(&b->lru_list);
list_add(&b->lru_list, &c->lru[dirty]);
+ b->last_accessed = jiffies;
}
/*----------------------------------------------------------------
buffers = DM_BUFIO_MIN_BUFFERS;
*limit_buffers = buffers;
- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+ *threshold_buffers = mult_frac(buffers,
+ DM_BUFIO_WRITEBACK_PERCENT, 100);
}
/*
/*
* Test if the buffer is unused and too old, and commit it.
- * At if noio is set, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
- * different bufio client.
+ * And if GFP_NOFS is used, we must not do any I/O because we hold
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
+ * rerouted to different bufio client.
*/
static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
unsigned long max_jiffies)
if (jiffies - b->last_accessed < max_jiffies)
return 1;
- if (!(gfp & __GFP_IO)) {
+ if (!(gfp & __GFP_FS)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
unsigned long r;
unsigned long nr_to_scan = sc->nr_to_scan;
- if (sc->gfp_mask & __GFP_IO)
+ if (sc->gfp_mask & __GFP_FS)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
return !nr_to_scan ? 0 : -1;
{
__u64 mem;
+ dm_bufio_allocated_kmem_cache = 0;
+ dm_bufio_allocated_get_free_pages = 0;
+ dm_bufio_allocated_vmalloc = 0;
+ dm_bufio_current_allocated = 0;
+
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
- mem = (__u64)((totalram_pages - totalhigh_pages) *
- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
if (mem > ULONG_MAX)
mem = ULONG_MAX;
#ifdef CONFIG_MMU
- /*
- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
- * in fs/proc/internal.h
- */
- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+ if (mem > mult_frac(VMALLOC_END - VMALLOC_START, DM_BUFIO_VMALLOC_PERCENT, 100))
+ mem = mult_frac(VMALLOC_END - VMALLOC_START, DM_BUFIO_VMALLOC_PERCENT, 100);
#endif
dm_bufio_default_cache_size = mem;