2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
46 #define ZRAM_ATTR_RO(name) \
47 static ssize_t zram_attr_##name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
50 struct zram *zram = dev_to_zram(d); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
54 static struct device_attribute dev_attr_##name = \
55 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
57 static inline int init_done(struct zram *zram)
59 return zram->meta != NULL;
62 static inline struct zram *dev_to_zram(struct device *dev)
64 return (struct zram *)dev_to_disk(dev)->private_data;
67 static ssize_t disksize_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
70 struct zram *zram = dev_to_zram(dev);
72 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
75 static ssize_t initstate_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
79 struct zram *zram = dev_to_zram(dev);
81 down_read(&zram->init_lock);
82 val = init_done(zram);
83 up_read(&zram->init_lock);
85 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
88 static ssize_t orig_data_size_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
91 struct zram *zram = dev_to_zram(dev);
93 return scnprintf(buf, PAGE_SIZE, "%llu\n",
94 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
97 static ssize_t mem_used_total_show(struct device *dev,
98 struct device_attribute *attr, char *buf)
101 struct zram *zram = dev_to_zram(dev);
103 down_read(&zram->init_lock);
104 if (init_done(zram)) {
105 struct zram_meta *meta = zram->meta;
106 val = zs_get_total_pages(meta->mem_pool);
108 up_read(&zram->init_lock);
110 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
113 static ssize_t max_comp_streams_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
117 struct zram *zram = dev_to_zram(dev);
119 down_read(&zram->init_lock);
120 val = zram->max_comp_streams;
121 up_read(&zram->init_lock);
123 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
126 static ssize_t mem_limit_show(struct device *dev,
127 struct device_attribute *attr, char *buf)
130 struct zram *zram = dev_to_zram(dev);
132 down_read(&zram->init_lock);
133 val = zram->limit_pages;
134 up_read(&zram->init_lock);
136 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
139 static ssize_t mem_limit_store(struct device *dev,
140 struct device_attribute *attr, const char *buf, size_t len)
144 struct zram *zram = dev_to_zram(dev);
146 limit = memparse(buf, &tmp);
147 if (buf == tmp) /* no chars parsed, invalid input */
150 down_write(&zram->init_lock);
151 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
152 up_write(&zram->init_lock);
157 static ssize_t mem_used_max_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
161 struct zram *zram = dev_to_zram(dev);
163 down_read(&zram->init_lock);
165 val = atomic_long_read(&zram->stats.max_used_pages);
166 up_read(&zram->init_lock);
168 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
171 static ssize_t mem_used_max_store(struct device *dev,
172 struct device_attribute *attr, const char *buf, size_t len)
176 struct zram *zram = dev_to_zram(dev);
178 err = kstrtoul(buf, 10, &val);
182 down_read(&zram->init_lock);
183 if (init_done(zram)) {
184 struct zram_meta *meta = zram->meta;
185 atomic_long_set(&zram->stats.max_used_pages,
186 zs_get_total_pages(meta->mem_pool));
188 up_read(&zram->init_lock);
193 static ssize_t max_comp_streams_store(struct device *dev,
194 struct device_attribute *attr, const char *buf, size_t len)
197 struct zram *zram = dev_to_zram(dev);
200 ret = kstrtoint(buf, 0, &num);
206 down_write(&zram->init_lock);
207 if (init_done(zram)) {
208 if (!zcomp_set_max_streams(zram->comp, num)) {
209 pr_info("Cannot change max compression streams\n");
215 zram->max_comp_streams = num;
218 up_write(&zram->init_lock);
222 static ssize_t comp_algorithm_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
226 struct zram *zram = dev_to_zram(dev);
228 down_read(&zram->init_lock);
229 sz = zcomp_available_show(zram->compressor, buf);
230 up_read(&zram->init_lock);
235 static ssize_t comp_algorithm_store(struct device *dev,
236 struct device_attribute *attr, const char *buf, size_t len)
238 struct zram *zram = dev_to_zram(dev);
239 down_write(&zram->init_lock);
240 if (init_done(zram)) {
241 up_write(&zram->init_lock);
242 pr_info("Can't change algorithm for initialized device\n");
245 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
246 up_write(&zram->init_lock);
250 /* flag operations needs meta->tb_lock */
251 static int zram_test_flag(struct zram_meta *meta, u32 index,
252 enum zram_pageflags flag)
254 return meta->table[index].value & BIT(flag);
257 static void zram_set_flag(struct zram_meta *meta, u32 index,
258 enum zram_pageflags flag)
260 meta->table[index].value |= BIT(flag);
263 static void zram_clear_flag(struct zram_meta *meta, u32 index,
264 enum zram_pageflags flag)
266 meta->table[index].value &= ~BIT(flag);
269 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
271 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
274 static void zram_set_obj_size(struct zram_meta *meta,
275 u32 index, size_t size)
277 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
279 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
282 static inline int is_partial_io(struct bio_vec *bvec)
284 return bvec->bv_len != PAGE_SIZE;
288 * Check if request is within bounds and aligned on zram logical blocks.
290 static inline int valid_io_request(struct zram *zram, struct bio *bio)
292 u64 start, end, bound;
294 /* unaligned request */
295 if (unlikely(bio->bi_iter.bi_sector &
296 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
298 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
301 start = bio->bi_iter.bi_sector;
302 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
303 bound = zram->disksize >> SECTOR_SHIFT;
304 /* out of range range */
305 if (unlikely(start >= bound || end > bound || start > end))
308 /* I/O request is valid */
312 static void zram_meta_free(struct zram_meta *meta)
314 zs_destroy_pool(meta->mem_pool);
319 static struct zram_meta *zram_meta_alloc(u64 disksize)
322 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
326 num_pages = disksize >> PAGE_SHIFT;
327 meta->table = vzalloc(num_pages * sizeof(*meta->table));
329 pr_err("Error allocating zram address table\n");
333 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
334 if (!meta->mem_pool) {
335 pr_err("Error creating memory pool\n");
350 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
352 if (*offset + bvec->bv_len >= PAGE_SIZE)
354 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
357 static int page_zero_filled(void *ptr)
362 page = (unsigned long *)ptr;
364 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
372 static void handle_zero_page(struct bio_vec *bvec)
374 struct page *page = bvec->bv_page;
377 user_mem = kmap_atomic(page);
378 if (is_partial_io(bvec))
379 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
381 clear_page(user_mem);
382 kunmap_atomic(user_mem);
384 flush_dcache_page(page);
389 * To protect concurrent access to the same index entry,
390 * caller should hold this table index entry's bit_spinlock to
391 * indicate this index entry is accessing.
393 static void zram_free_page(struct zram *zram, size_t index)
395 struct zram_meta *meta = zram->meta;
396 unsigned long handle = meta->table[index].handle;
398 if (unlikely(!handle)) {
400 * No memory is allocated for zero filled pages.
401 * Simply clear zero page flag.
403 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
404 zram_clear_flag(meta, index, ZRAM_ZERO);
405 atomic64_dec(&zram->stats.zero_pages);
410 zs_free(meta->mem_pool, handle);
412 atomic64_sub(zram_get_obj_size(meta, index),
413 &zram->stats.compr_data_size);
414 atomic64_dec(&zram->stats.pages_stored);
416 meta->table[index].handle = 0;
417 zram_set_obj_size(meta, index, 0);
420 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
424 struct zram_meta *meta = zram->meta;
425 unsigned long handle;
428 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
429 handle = meta->table[index].handle;
430 size = zram_get_obj_size(meta, index);
432 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
433 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
438 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
439 if (size == PAGE_SIZE)
440 copy_page(mem, cmem);
442 ret = zcomp_decompress(zram->comp, cmem, size, mem);
443 zs_unmap_object(meta->mem_pool, handle);
444 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
446 /* Should NEVER happen. Return bio error if it does. */
448 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
455 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
456 u32 index, int offset, struct bio *bio)
460 unsigned char *user_mem, *uncmem = NULL;
461 struct zram_meta *meta = zram->meta;
462 page = bvec->bv_page;
464 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
465 if (unlikely(!meta->table[index].handle) ||
466 zram_test_flag(meta, index, ZRAM_ZERO)) {
467 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
468 handle_zero_page(bvec);
471 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
473 if (is_partial_io(bvec))
474 /* Use a temporary buffer to decompress the page */
475 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
477 user_mem = kmap_atomic(page);
478 if (!is_partial_io(bvec))
482 pr_info("Unable to allocate temp memory\n");
487 ret = zram_decompress_page(zram, uncmem, index);
488 /* Should NEVER happen. Return bio error if it does. */
492 if (is_partial_io(bvec))
493 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
496 flush_dcache_page(page);
499 kunmap_atomic(user_mem);
500 if (is_partial_io(bvec))
505 static inline void update_used_max(struct zram *zram,
506 const unsigned long pages)
508 int old_max, cur_max;
510 old_max = atomic_long_read(&zram->stats.max_used_pages);
515 old_max = atomic_long_cmpxchg(
516 &zram->stats.max_used_pages, cur_max, pages);
517 } while (old_max != cur_max);
520 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
525 unsigned long handle;
527 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
528 struct zram_meta *meta = zram->meta;
529 struct zcomp_strm *zstrm;
531 unsigned long alloced_pages;
533 page = bvec->bv_page;
534 if (is_partial_io(bvec)) {
536 * This is a partial IO. We need to read the full page
537 * before to write the changes.
539 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
544 ret = zram_decompress_page(zram, uncmem, index);
549 zstrm = zcomp_strm_find(zram->comp);
551 user_mem = kmap_atomic(page);
553 if (is_partial_io(bvec)) {
554 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
556 kunmap_atomic(user_mem);
562 if (page_zero_filled(uncmem)) {
563 kunmap_atomic(user_mem);
564 /* Free memory associated with this sector now. */
565 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
566 zram_free_page(zram, index);
567 zram_set_flag(meta, index, ZRAM_ZERO);
568 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
570 atomic64_inc(&zram->stats.zero_pages);
575 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
576 if (!is_partial_io(bvec)) {
577 kunmap_atomic(user_mem);
583 pr_err("Compression failed! err=%d\n", ret);
587 if (unlikely(clen > max_zpage_size)) {
589 if (is_partial_io(bvec))
593 handle = zs_malloc(meta->mem_pool, clen);
595 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
601 alloced_pages = zs_get_total_pages(meta->mem_pool);
602 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
603 zs_free(meta->mem_pool, handle);
608 update_used_max(zram, alloced_pages);
610 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
612 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
613 src = kmap_atomic(page);
614 copy_page(cmem, src);
617 memcpy(cmem, src, clen);
620 zcomp_strm_release(zram->comp, zstrm);
622 zs_unmap_object(meta->mem_pool, handle);
625 * Free memory associated with this sector
626 * before overwriting unused sectors.
628 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
629 zram_free_page(zram, index);
631 meta->table[index].handle = handle;
632 zram_set_obj_size(meta, index, clen);
633 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
636 atomic64_add(clen, &zram->stats.compr_data_size);
637 atomic64_inc(&zram->stats.pages_stored);
640 zcomp_strm_release(zram->comp, zstrm);
641 if (is_partial_io(bvec))
646 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
647 int offset, struct bio *bio)
650 int rw = bio_data_dir(bio);
653 atomic64_inc(&zram->stats.num_reads);
654 ret = zram_bvec_read(zram, bvec, index, offset, bio);
656 atomic64_inc(&zram->stats.num_writes);
657 ret = zram_bvec_write(zram, bvec, index, offset);
662 atomic64_inc(&zram->stats.failed_reads);
664 atomic64_inc(&zram->stats.failed_writes);
671 * zram_bio_discard - handler on discard request
672 * @index: physical block index in PAGE_SIZE units
673 * @offset: byte offset within physical block
675 static void zram_bio_discard(struct zram *zram, u32 index,
676 int offset, struct bio *bio)
678 size_t n = bio->bi_iter.bi_size;
679 struct zram_meta *meta = zram->meta;
682 * zram manages data in physical block size units. Because logical block
683 * size isn't identical with physical block size on some arch, we
684 * could get a discard request pointing to a specific offset within a
685 * certain physical block. Although we can handle this request by
686 * reading that physiclal block and decompressing and partially zeroing
687 * and re-compressing and then re-storing it, this isn't reasonable
688 * because our intent with a discard request is to save memory. So
689 * skipping this logical block is appropriate here.
692 if (n <= (PAGE_SIZE - offset))
695 n -= (PAGE_SIZE - offset);
699 while (n >= PAGE_SIZE) {
700 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
701 zram_free_page(zram, index);
702 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
703 atomic64_inc(&zram->stats.notify_free);
709 static void zram_reset_device(struct zram *zram, bool reset_capacity)
712 struct zram_meta *meta;
714 down_write(&zram->init_lock);
716 zram->limit_pages = 0;
718 if (!init_done(zram)) {
719 up_write(&zram->init_lock);
724 /* Free all pages that are still in this zram device */
725 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
726 unsigned long handle = meta->table[index].handle;
730 zs_free(meta->mem_pool, handle);
733 zcomp_destroy(zram->comp);
734 zram->max_comp_streams = 1;
736 zram_meta_free(zram->meta);
739 memset(&zram->stats, 0, sizeof(zram->stats));
743 set_capacity(zram->disk, 0);
745 up_write(&zram->init_lock);
748 * Revalidate disk out of the init_lock to avoid lockdep splat.
749 * It's okay because disk's capacity is protected by init_lock
750 * so that revalidate_disk always sees up-to-date capacity.
753 revalidate_disk(zram->disk);
756 static ssize_t disksize_store(struct device *dev,
757 struct device_attribute *attr, const char *buf, size_t len)
761 struct zram_meta *meta;
762 struct zram *zram = dev_to_zram(dev);
765 disksize = memparse(buf, NULL);
769 disksize = PAGE_ALIGN(disksize);
770 meta = zram_meta_alloc(disksize);
774 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
776 pr_info("Cannot initialise %s compressing backend\n",
782 down_write(&zram->init_lock);
783 if (init_done(zram)) {
784 pr_info("Cannot change disksize for initialized device\n");
786 goto out_destroy_comp;
791 zram->disksize = disksize;
792 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
793 up_write(&zram->init_lock);
796 * Revalidate disk out of the init_lock to avoid lockdep splat.
797 * It's okay because disk's capacity is protected by init_lock
798 * so that revalidate_disk always sees up-to-date capacity.
800 revalidate_disk(zram->disk);
805 up_write(&zram->init_lock);
808 zram_meta_free(meta);
812 static ssize_t reset_store(struct device *dev,
813 struct device_attribute *attr, const char *buf, size_t len)
816 unsigned short do_reset;
818 struct block_device *bdev;
820 zram = dev_to_zram(dev);
821 bdev = bdget_disk(zram->disk, 0);
826 /* Do not reset an active device! */
827 if (bdev->bd_holders) {
832 ret = kstrtou16(buf, 10, &do_reset);
841 /* Make sure all pending I/O is finished */
845 zram_reset_device(zram, true);
853 static void __zram_make_request(struct zram *zram, struct bio *bio)
858 struct bvec_iter iter;
860 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
861 offset = (bio->bi_iter.bi_sector &
862 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
864 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
865 zram_bio_discard(zram, index, offset, bio);
870 bio_for_each_segment(bvec, bio, iter) {
871 int max_transfer_size = PAGE_SIZE - offset;
873 if (bvec.bv_len > max_transfer_size) {
875 * zram_bvec_rw() can only make operation on a single
876 * zram page. Split the bio vector.
880 bv.bv_page = bvec.bv_page;
881 bv.bv_len = max_transfer_size;
882 bv.bv_offset = bvec.bv_offset;
884 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
887 bv.bv_len = bvec.bv_len - max_transfer_size;
888 bv.bv_offset += max_transfer_size;
889 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
892 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
895 update_position(&index, &offset, &bvec);
898 set_bit(BIO_UPTODATE, &bio->bi_flags);
907 * Handler function for all zram I/O requests.
909 static void zram_make_request(struct request_queue *queue, struct bio *bio)
911 struct zram *zram = queue->queuedata;
913 down_read(&zram->init_lock);
914 if (unlikely(!init_done(zram)))
917 if (!valid_io_request(zram, bio)) {
918 atomic64_inc(&zram->stats.invalid_io);
922 __zram_make_request(zram, bio);
923 up_read(&zram->init_lock);
928 up_read(&zram->init_lock);
932 static void zram_slot_free_notify(struct block_device *bdev,
936 struct zram_meta *meta;
938 zram = bdev->bd_disk->private_data;
941 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
942 zram_free_page(zram, index);
943 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
944 atomic64_inc(&zram->stats.notify_free);
947 static const struct block_device_operations zram_devops = {
948 .swap_slot_free_notify = zram_slot_free_notify,
952 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
953 disksize_show, disksize_store);
954 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
955 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
956 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
957 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
958 static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
960 static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
962 static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
963 max_comp_streams_show, max_comp_streams_store);
964 static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
965 comp_algorithm_show, comp_algorithm_store);
967 ZRAM_ATTR_RO(num_reads);
968 ZRAM_ATTR_RO(num_writes);
969 ZRAM_ATTR_RO(failed_reads);
970 ZRAM_ATTR_RO(failed_writes);
971 ZRAM_ATTR_RO(invalid_io);
972 ZRAM_ATTR_RO(notify_free);
973 ZRAM_ATTR_RO(zero_pages);
974 ZRAM_ATTR_RO(compr_data_size);
976 static struct attribute *zram_disk_attrs[] = {
977 &dev_attr_disksize.attr,
978 &dev_attr_initstate.attr,
979 &dev_attr_reset.attr,
980 &dev_attr_num_reads.attr,
981 &dev_attr_num_writes.attr,
982 &dev_attr_failed_reads.attr,
983 &dev_attr_failed_writes.attr,
984 &dev_attr_invalid_io.attr,
985 &dev_attr_notify_free.attr,
986 &dev_attr_zero_pages.attr,
987 &dev_attr_orig_data_size.attr,
988 &dev_attr_compr_data_size.attr,
989 &dev_attr_mem_used_total.attr,
990 &dev_attr_mem_limit.attr,
991 &dev_attr_mem_used_max.attr,
992 &dev_attr_max_comp_streams.attr,
993 &dev_attr_comp_algorithm.attr,
997 static struct attribute_group zram_disk_attr_group = {
998 .attrs = zram_disk_attrs,
1001 static int create_device(struct zram *zram, int device_id)
1005 init_rwsem(&zram->init_lock);
1007 zram->queue = blk_alloc_queue(GFP_KERNEL);
1009 pr_err("Error allocating disk queue for device %d\n",
1014 blk_queue_make_request(zram->queue, zram_make_request);
1015 zram->queue->queuedata = zram;
1017 /* gendisk structure */
1018 zram->disk = alloc_disk(1);
1020 pr_warn("Error allocating disk structure for device %d\n",
1022 goto out_free_queue;
1025 zram->disk->major = zram_major;
1026 zram->disk->first_minor = device_id;
1027 zram->disk->fops = &zram_devops;
1028 zram->disk->queue = zram->queue;
1029 zram->disk->private_data = zram;
1030 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1032 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1033 set_capacity(zram->disk, 0);
1034 /* zram devices sort of resembles non-rotational disks */
1035 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1036 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1038 * To ensure that we always get PAGE_SIZE aligned
1039 * and n*PAGE_SIZED sized I/O requests.
1041 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1042 blk_queue_logical_block_size(zram->disk->queue,
1043 ZRAM_LOGICAL_BLOCK_SIZE);
1044 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1045 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1046 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1047 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1049 * zram_bio_discard() will clear all logical blocks if logical block
1050 * size is identical with physical block size(PAGE_SIZE). But if it is
1051 * different, we will skip discarding some parts of logical blocks in
1052 * the part of the request range which isn't aligned to physical block
1053 * size. So we can't ensure that all discarded logical blocks are
1056 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1057 zram->disk->queue->limits.discard_zeroes_data = 1;
1059 zram->disk->queue->limits.discard_zeroes_data = 0;
1060 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1062 add_disk(zram->disk);
1064 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1065 &zram_disk_attr_group);
1067 pr_warn("Error creating sysfs group");
1070 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1072 zram->max_comp_streams = 1;
1076 del_gendisk(zram->disk);
1077 put_disk(zram->disk);
1079 blk_cleanup_queue(zram->queue);
1084 static void destroy_device(struct zram *zram)
1086 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1087 &zram_disk_attr_group);
1089 del_gendisk(zram->disk);
1090 put_disk(zram->disk);
1092 blk_cleanup_queue(zram->queue);
1095 static int __init zram_init(void)
1099 if (num_devices > max_num_devices) {
1100 pr_warn("Invalid value for num_devices: %u\n",
1106 zram_major = register_blkdev(0, "zram");
1107 if (zram_major <= 0) {
1108 pr_warn("Unable to get major number\n");
1113 /* Allocate the device array and initialize each one */
1114 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1115 if (!zram_devices) {
1120 for (dev_id = 0; dev_id < num_devices; dev_id++) {
1121 ret = create_device(&zram_devices[dev_id], dev_id);
1126 pr_info("Created %u device(s) ...\n", num_devices);
1132 destroy_device(&zram_devices[--dev_id]);
1133 kfree(zram_devices);
1135 unregister_blkdev(zram_major, "zram");
1140 static void __exit zram_exit(void)
1145 for (i = 0; i < num_devices; i++) {
1146 zram = &zram_devices[i];
1148 destroy_device(zram);
1150 * Shouldn't access zram->disk after destroy_device
1151 * because destroy_device already released zram->disk.
1153 zram_reset_device(zram, false);
1156 unregister_blkdev(zram_major, "zram");
1158 kfree(zram_devices);
1159 pr_debug("Cleanup done!\n");
1162 module_init(zram_init);
1163 module_exit(zram_exit);
1165 module_param(num_devices, uint, 0);
1166 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1168 MODULE_LICENSE("Dual BSD/GPL");
1169 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1170 MODULE_DESCRIPTION("Compressed RAM Block Device");