Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[pandora-kernel.git] / drivers / staging / zcache / zcache-main.c
1 /*
2  * zcache.c
3  *
4  * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
5  * Copyright (c) 2010,2011, Nitin Gupta
6  *
7  * Zcache provides an in-kernel "host implementation" for transcendent memory
8  * and, thus indirectly, for cleancache and frontswap.  Zcache includes two
9  * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
10  * 1) "compression buddies" ("zbud") is used for ephemeral pages
11  * 2) xvmalloc is used for persistent pages.
12  * Xvmalloc (based on the TLSF allocator) has very low fragmentation
13  * so maximizes space efficiency, while zbud allows pairs (and potentially,
14  * in the future, more than a pair of) compressed pages to be closely linked
15  * so that reclaiming can be done via the kernel's physical-page-oriented
16  * "shrinker" interface.
17  *
18  * [1] For a definition of page-accessible memory (aka PAM), see:
19  *   http://marc.info/?l=linux-mm&m=127811271605009
20  */
21
22 #include <linux/module.h>
23 #include <linux/cpu.h>
24 #include <linux/highmem.h>
25 #include <linux/list.h>
26 #include <linux/lzo.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/math64.h>
32 #include "tmem.h"
33
34 #include "../zram/xvmalloc.h" /* if built in drivers/staging */
35
36 #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
37 #error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
38 #endif
39 #ifdef CONFIG_CLEANCACHE
40 #include <linux/cleancache.h>
41 #endif
42 #ifdef CONFIG_FRONTSWAP
43 #include <linux/frontswap.h>
44 #endif
45
46 #if 0
47 /* this is more aggressive but may cause other problems? */
48 #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
49 #else
50 #define ZCACHE_GFP_MASK \
51         (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
52 #endif
53
54 #define MAX_POOLS_PER_CLIENT 16
55
56 #define MAX_CLIENTS 16
57 #define LOCAL_CLIENT ((uint16_t)-1)
58
59 MODULE_LICENSE("GPL");
60
61 struct zcache_client {
62         struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
63         struct xv_pool *xvpool;
64         bool allocated;
65         atomic_t refcount;
66 };
67
68 static struct zcache_client zcache_host;
69 static struct zcache_client zcache_clients[MAX_CLIENTS];
70
71 static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
72 {
73         BUG_ON(cli == NULL);
74         if (cli == &zcache_host)
75                 return LOCAL_CLIENT;
76         return cli - &zcache_clients[0];
77 }
78
79 static inline bool is_local_client(struct zcache_client *cli)
80 {
81         return cli == &zcache_host;
82 }
83
84 /**********
85  * Compression buddies ("zbud") provides for packing two (or, possibly
86  * in the future, more) compressed ephemeral pages into a single "raw"
87  * (physical) page and tracking them with data structures so that
88  * the raw pages can be easily reclaimed.
89  *
90  * A zbud page ("zbpg") is an aligned page containing a list_head,
91  * a lock, and two "zbud headers".  The remainder of the physical
92  * page is divided up into aligned 64-byte "chunks" which contain
93  * the compressed data for zero, one, or two zbuds.  Each zbpg
94  * resides on: (1) an "unused list" if it has no zbuds; (2) a
95  * "buddied" list if it is fully populated  with two zbuds; or
96  * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
97  * the one unbuddied zbud uses.  The data inside a zbpg cannot be
98  * read or written unless the zbpg's lock is held.
99  */
100
101 #define ZBH_SENTINEL  0x43214321
102 #define ZBPG_SENTINEL  0xdeadbeef
103
104 #define ZBUD_MAX_BUDS 2
105
106 struct zbud_hdr {
107         uint16_t client_id;
108         uint16_t pool_id;
109         struct tmem_oid oid;
110         uint32_t index;
111         uint16_t size; /* compressed size in bytes, zero means unused */
112         DECL_SENTINEL
113 };
114
115 struct zbud_page {
116         struct list_head bud_list;
117         spinlock_t lock;
118         struct zbud_hdr buddy[ZBUD_MAX_BUDS];
119         DECL_SENTINEL
120         /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
121 };
122
123 #define CHUNK_SHIFT     6
124 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
125 #define CHUNK_MASK      (~(CHUNK_SIZE-1))
126 #define NCHUNKS         (((PAGE_SIZE - sizeof(struct zbud_page)) & \
127                                 CHUNK_MASK) >> CHUNK_SHIFT)
128 #define MAX_CHUNK       (NCHUNKS-1)
129
130 static struct {
131         struct list_head list;
132         unsigned count;
133 } zbud_unbuddied[NCHUNKS];
134 /* list N contains pages with N chunks USED and NCHUNKS-N unused */
135 /* element 0 is never used but optimizing that isn't worth it */
136 static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
137
138 struct list_head zbud_buddied_list;
139 static unsigned long zcache_zbud_buddied_count;
140
141 /* protects the buddied list and all unbuddied lists */
142 static DEFINE_SPINLOCK(zbud_budlists_spinlock);
143
144 static LIST_HEAD(zbpg_unused_list);
145 static unsigned long zcache_zbpg_unused_list_count;
146
147 /* protects the unused page list */
148 static DEFINE_SPINLOCK(zbpg_unused_list_spinlock);
149
150 static atomic_t zcache_zbud_curr_raw_pages;
151 static atomic_t zcache_zbud_curr_zpages;
152 static unsigned long zcache_zbud_curr_zbytes;
153 static unsigned long zcache_zbud_cumul_zpages;
154 static unsigned long zcache_zbud_cumul_zbytes;
155 static unsigned long zcache_compress_poor;
156 static unsigned long zcache_mean_compress_poor;
157
158 /* forward references */
159 static void *zcache_get_free_page(void);
160 static void zcache_free_page(void *p);
161
162 /*
163  * zbud helper functions
164  */
165
166 static inline unsigned zbud_max_buddy_size(void)
167 {
168         return MAX_CHUNK << CHUNK_SHIFT;
169 }
170
171 static inline unsigned zbud_size_to_chunks(unsigned size)
172 {
173         BUG_ON(size == 0 || size > zbud_max_buddy_size());
174         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
175 }
176
177 static inline int zbud_budnum(struct zbud_hdr *zh)
178 {
179         unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
180         struct zbud_page *zbpg = NULL;
181         unsigned budnum = -1U;
182         int i;
183
184         for (i = 0; i < ZBUD_MAX_BUDS; i++)
185                 if (offset == offsetof(typeof(*zbpg), buddy[i])) {
186                         budnum = i;
187                         break;
188                 }
189         BUG_ON(budnum == -1U);
190         return budnum;
191 }
192
193 static char *zbud_data(struct zbud_hdr *zh, unsigned size)
194 {
195         struct zbud_page *zbpg;
196         char *p;
197         unsigned budnum;
198
199         ASSERT_SENTINEL(zh, ZBH);
200         budnum = zbud_budnum(zh);
201         BUG_ON(size == 0 || size > zbud_max_buddy_size());
202         zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
203         ASSERT_SPINLOCK(&zbpg->lock);
204         p = (char *)zbpg;
205         if (budnum == 0)
206                 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
207                                                         CHUNK_MASK);
208         else if (budnum == 1)
209                 p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
210         return p;
211 }
212
213 /*
214  * zbud raw page management
215  */
216
217 static struct zbud_page *zbud_alloc_raw_page(void)
218 {
219         struct zbud_page *zbpg = NULL;
220         struct zbud_hdr *zh0, *zh1;
221         bool recycled = 0;
222
223         /* if any pages on the zbpg list, use one */
224         spin_lock(&zbpg_unused_list_spinlock);
225         if (!list_empty(&zbpg_unused_list)) {
226                 zbpg = list_first_entry(&zbpg_unused_list,
227                                 struct zbud_page, bud_list);
228                 list_del_init(&zbpg->bud_list);
229                 zcache_zbpg_unused_list_count--;
230                 recycled = 1;
231         }
232         spin_unlock(&zbpg_unused_list_spinlock);
233         if (zbpg == NULL)
234                 /* none on zbpg list, try to get a kernel page */
235                 zbpg = zcache_get_free_page();
236         if (likely(zbpg != NULL)) {
237                 INIT_LIST_HEAD(&zbpg->bud_list);
238                 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
239                 spin_lock_init(&zbpg->lock);
240                 if (recycled) {
241                         ASSERT_INVERTED_SENTINEL(zbpg, ZBPG);
242                         SET_SENTINEL(zbpg, ZBPG);
243                         BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
244                         BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
245                 } else {
246                         atomic_inc(&zcache_zbud_curr_raw_pages);
247                         INIT_LIST_HEAD(&zbpg->bud_list);
248                         SET_SENTINEL(zbpg, ZBPG);
249                         zh0->size = 0; zh1->size = 0;
250                         tmem_oid_set_invalid(&zh0->oid);
251                         tmem_oid_set_invalid(&zh1->oid);
252                 }
253         }
254         return zbpg;
255 }
256
257 static void zbud_free_raw_page(struct zbud_page *zbpg)
258 {
259         struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
260
261         ASSERT_SENTINEL(zbpg, ZBPG);
262         BUG_ON(!list_empty(&zbpg->bud_list));
263         ASSERT_SPINLOCK(&zbpg->lock);
264         BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
265         BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
266         INVERT_SENTINEL(zbpg, ZBPG);
267         spin_unlock(&zbpg->lock);
268         spin_lock(&zbpg_unused_list_spinlock);
269         list_add(&zbpg->bud_list, &zbpg_unused_list);
270         zcache_zbpg_unused_list_count++;
271         spin_unlock(&zbpg_unused_list_spinlock);
272 }
273
274 /*
275  * core zbud handling routines
276  */
277
278 static unsigned zbud_free(struct zbud_hdr *zh)
279 {
280         unsigned size;
281
282         ASSERT_SENTINEL(zh, ZBH);
283         BUG_ON(!tmem_oid_valid(&zh->oid));
284         size = zh->size;
285         BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
286         zh->size = 0;
287         tmem_oid_set_invalid(&zh->oid);
288         INVERT_SENTINEL(zh, ZBH);
289         zcache_zbud_curr_zbytes -= size;
290         atomic_dec(&zcache_zbud_curr_zpages);
291         return size;
292 }
293
294 static void zbud_free_and_delist(struct zbud_hdr *zh)
295 {
296         unsigned chunks;
297         struct zbud_hdr *zh_other;
298         unsigned budnum = zbud_budnum(zh), size;
299         struct zbud_page *zbpg =
300                 container_of(zh, struct zbud_page, buddy[budnum]);
301
302         spin_lock(&zbpg->lock);
303         if (list_empty(&zbpg->bud_list)) {
304                 /* ignore zombie page... see zbud_evict_pages() */
305                 spin_unlock(&zbpg->lock);
306                 return;
307         }
308         size = zbud_free(zh);
309         ASSERT_SPINLOCK(&zbpg->lock);
310         zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
311         if (zh_other->size == 0) { /* was unbuddied: unlist and free */
312                 chunks = zbud_size_to_chunks(size) ;
313                 spin_lock(&zbud_budlists_spinlock);
314                 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
315                 list_del_init(&zbpg->bud_list);
316                 zbud_unbuddied[chunks].count--;
317                 spin_unlock(&zbud_budlists_spinlock);
318                 zbud_free_raw_page(zbpg);
319         } else { /* was buddied: move remaining buddy to unbuddied list */
320                 chunks = zbud_size_to_chunks(zh_other->size) ;
321                 spin_lock(&zbud_budlists_spinlock);
322                 list_del_init(&zbpg->bud_list);
323                 zcache_zbud_buddied_count--;
324                 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
325                 zbud_unbuddied[chunks].count++;
326                 spin_unlock(&zbud_budlists_spinlock);
327                 spin_unlock(&zbpg->lock);
328         }
329 }
330
331 static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
332                                         struct tmem_oid *oid,
333                                         uint32_t index, struct page *page,
334                                         void *cdata, unsigned size)
335 {
336         struct zbud_hdr *zh0, *zh1, *zh = NULL;
337         struct zbud_page *zbpg = NULL, *ztmp;
338         unsigned nchunks;
339         char *to;
340         int i, found_good_buddy = 0;
341
342         nchunks = zbud_size_to_chunks(size) ;
343         for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
344                 spin_lock(&zbud_budlists_spinlock);
345                 if (!list_empty(&zbud_unbuddied[i].list)) {
346                         list_for_each_entry_safe(zbpg, ztmp,
347                                     &zbud_unbuddied[i].list, bud_list) {
348                                 if (spin_trylock(&zbpg->lock)) {
349                                         found_good_buddy = i;
350                                         goto found_unbuddied;
351                                 }
352                         }
353                 }
354                 spin_unlock(&zbud_budlists_spinlock);
355         }
356         /* didn't find a good buddy, try allocating a new page */
357         zbpg = zbud_alloc_raw_page();
358         if (unlikely(zbpg == NULL))
359                 goto out;
360         /* ok, have a page, now compress the data before taking locks */
361         spin_lock(&zbpg->lock);
362         spin_lock(&zbud_budlists_spinlock);
363         list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
364         zbud_unbuddied[nchunks].count++;
365         zh = &zbpg->buddy[0];
366         goto init_zh;
367
368 found_unbuddied:
369         ASSERT_SPINLOCK(&zbpg->lock);
370         zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
371         BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
372         if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
373                 ASSERT_SENTINEL(zh0, ZBH);
374                 zh = zh1;
375         } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
376                 ASSERT_SENTINEL(zh1, ZBH);
377                 zh = zh0;
378         } else
379                 BUG();
380         list_del_init(&zbpg->bud_list);
381         zbud_unbuddied[found_good_buddy].count--;
382         list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
383         zcache_zbud_buddied_count++;
384
385 init_zh:
386         SET_SENTINEL(zh, ZBH);
387         zh->size = size;
388         zh->index = index;
389         zh->oid = *oid;
390         zh->pool_id = pool_id;
391         zh->client_id = client_id;
392         /* can wait to copy the data until the list locks are dropped */
393         spin_unlock(&zbud_budlists_spinlock);
394
395         to = zbud_data(zh, size);
396         memcpy(to, cdata, size);
397         spin_unlock(&zbpg->lock);
398         zbud_cumul_chunk_counts[nchunks]++;
399         atomic_inc(&zcache_zbud_curr_zpages);
400         zcache_zbud_cumul_zpages++;
401         zcache_zbud_curr_zbytes += size;
402         zcache_zbud_cumul_zbytes += size;
403 out:
404         return zh;
405 }
406
407 static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
408 {
409         struct zbud_page *zbpg;
410         unsigned budnum = zbud_budnum(zh);
411         size_t out_len = PAGE_SIZE;
412         char *to_va, *from_va;
413         unsigned size;
414         int ret = 0;
415
416         zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
417         spin_lock(&zbpg->lock);
418         if (list_empty(&zbpg->bud_list)) {
419                 /* ignore zombie page... see zbud_evict_pages() */
420                 ret = -EINVAL;
421                 goto out;
422         }
423         ASSERT_SENTINEL(zh, ZBH);
424         BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
425         to_va = kmap_atomic(page, KM_USER0);
426         size = zh->size;
427         from_va = zbud_data(zh, size);
428         ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
429         BUG_ON(ret != LZO_E_OK);
430         BUG_ON(out_len != PAGE_SIZE);
431         kunmap_atomic(to_va, KM_USER0);
432 out:
433         spin_unlock(&zbpg->lock);
434         return ret;
435 }
436
437 /*
438  * The following routines handle shrinking of ephemeral pages by evicting
439  * pages "least valuable" first.
440  */
441
442 static unsigned long zcache_evicted_raw_pages;
443 static unsigned long zcache_evicted_buddied_pages;
444 static unsigned long zcache_evicted_unbuddied_pages;
445
446 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
447                                                 uint16_t poolid);
448 static void zcache_put_pool(struct tmem_pool *pool);
449
450 /*
451  * Flush and free all zbuds in a zbpg, then free the pageframe
452  */
453 static void zbud_evict_zbpg(struct zbud_page *zbpg)
454 {
455         struct zbud_hdr *zh;
456         int i, j;
457         uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
458         uint32_t index[ZBUD_MAX_BUDS];
459         struct tmem_oid oid[ZBUD_MAX_BUDS];
460         struct tmem_pool *pool;
461
462         ASSERT_SPINLOCK(&zbpg->lock);
463         BUG_ON(!list_empty(&zbpg->bud_list));
464         for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
465                 zh = &zbpg->buddy[i];
466                 if (zh->size) {
467                         client_id[j] = zh->client_id;
468                         pool_id[j] = zh->pool_id;
469                         oid[j] = zh->oid;
470                         index[j] = zh->index;
471                         j++;
472                         zbud_free(zh);
473                 }
474         }
475         spin_unlock(&zbpg->lock);
476         for (i = 0; i < j; i++) {
477                 pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
478                 if (pool != NULL) {
479                         tmem_flush_page(pool, &oid[i], index[i]);
480                         zcache_put_pool(pool);
481                 }
482         }
483         ASSERT_SENTINEL(zbpg, ZBPG);
484         spin_lock(&zbpg->lock);
485         zbud_free_raw_page(zbpg);
486 }
487
488 /*
489  * Free nr pages.  This code is funky because we want to hold the locks
490  * protecting various lists for as short a time as possible, and in some
491  * circumstances the list may change asynchronously when the list lock is
492  * not held.  In some cases we also trylock not only to avoid waiting on a
493  * page in use by another cpu, but also to avoid potential deadlock due to
494  * lock inversion.
495  */
496 static void zbud_evict_pages(int nr)
497 {
498         struct zbud_page *zbpg;
499         int i;
500
501         /* first try freeing any pages on unused list */
502 retry_unused_list:
503         spin_lock_bh(&zbpg_unused_list_spinlock);
504         if (!list_empty(&zbpg_unused_list)) {
505                 /* can't walk list here, since it may change when unlocked */
506                 zbpg = list_first_entry(&zbpg_unused_list,
507                                 struct zbud_page, bud_list);
508                 list_del_init(&zbpg->bud_list);
509                 zcache_zbpg_unused_list_count--;
510                 atomic_dec(&zcache_zbud_curr_raw_pages);
511                 spin_unlock_bh(&zbpg_unused_list_spinlock);
512                 zcache_free_page(zbpg);
513                 zcache_evicted_raw_pages++;
514                 if (--nr <= 0)
515                         goto out;
516                 goto retry_unused_list;
517         }
518         spin_unlock_bh(&zbpg_unused_list_spinlock);
519
520         /* now try freeing unbuddied pages, starting with least space avail */
521         for (i = 0; i < MAX_CHUNK; i++) {
522 retry_unbud_list_i:
523                 spin_lock_bh(&zbud_budlists_spinlock);
524                 if (list_empty(&zbud_unbuddied[i].list)) {
525                         spin_unlock_bh(&zbud_budlists_spinlock);
526                         continue;
527                 }
528                 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
529                         if (unlikely(!spin_trylock(&zbpg->lock)))
530                                 continue;
531                         list_del_init(&zbpg->bud_list);
532                         zbud_unbuddied[i].count--;
533                         spin_unlock(&zbud_budlists_spinlock);
534                         zcache_evicted_unbuddied_pages++;
535                         /* want budlists unlocked when doing zbpg eviction */
536                         zbud_evict_zbpg(zbpg);
537                         local_bh_enable();
538                         if (--nr <= 0)
539                                 goto out;
540                         goto retry_unbud_list_i;
541                 }
542                 spin_unlock_bh(&zbud_budlists_spinlock);
543         }
544
545         /* as a last resort, free buddied pages */
546 retry_bud_list:
547         spin_lock_bh(&zbud_budlists_spinlock);
548         if (list_empty(&zbud_buddied_list)) {
549                 spin_unlock_bh(&zbud_budlists_spinlock);
550                 goto out;
551         }
552         list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
553                 if (unlikely(!spin_trylock(&zbpg->lock)))
554                         continue;
555                 list_del_init(&zbpg->bud_list);
556                 zcache_zbud_buddied_count--;
557                 spin_unlock(&zbud_budlists_spinlock);
558                 zcache_evicted_buddied_pages++;
559                 /* want budlists unlocked when doing zbpg eviction */
560                 zbud_evict_zbpg(zbpg);
561                 local_bh_enable();
562                 if (--nr <= 0)
563                         goto out;
564                 goto retry_bud_list;
565         }
566         spin_unlock_bh(&zbud_budlists_spinlock);
567 out:
568         return;
569 }
570
571 static void zbud_init(void)
572 {
573         int i;
574
575         INIT_LIST_HEAD(&zbud_buddied_list);
576         zcache_zbud_buddied_count = 0;
577         for (i = 0; i < NCHUNKS; i++) {
578                 INIT_LIST_HEAD(&zbud_unbuddied[i].list);
579                 zbud_unbuddied[i].count = 0;
580         }
581 }
582
583 #ifdef CONFIG_SYSFS
584 /*
585  * These sysfs routines show a nice distribution of how many zbpg's are
586  * currently (and have ever been placed) in each unbuddied list.  It's fun
587  * to watch but can probably go away before final merge.
588  */
589 static int zbud_show_unbuddied_list_counts(char *buf)
590 {
591         int i;
592         char *p = buf;
593
594         for (i = 0; i < NCHUNKS; i++)
595                 p += sprintf(p, "%u ", zbud_unbuddied[i].count);
596         return p - buf;
597 }
598
599 static int zbud_show_cumul_chunk_counts(char *buf)
600 {
601         unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
602         unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
603         unsigned long total_chunks_lte_42 = 0;
604         char *p = buf;
605
606         for (i = 0; i < NCHUNKS; i++) {
607                 p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
608                 chunks += zbud_cumul_chunk_counts[i];
609                 total_chunks += zbud_cumul_chunk_counts[i];
610                 sum_total_chunks += i * zbud_cumul_chunk_counts[i];
611                 if (i == 21)
612                         total_chunks_lte_21 = total_chunks;
613                 if (i == 32)
614                         total_chunks_lte_32 = total_chunks;
615                 if (i == 42)
616                         total_chunks_lte_42 = total_chunks;
617         }
618         p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
619                 total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
620                 chunks == 0 ? 0 : sum_total_chunks / chunks);
621         return p - buf;
622 }
623 #endif
624
625 /**********
626  * This "zv" PAM implementation combines the TLSF-based xvMalloc
627  * with lzo1x compression to maximize the amount of data that can
628  * be packed into a physical page.
629  *
630  * Zv represents a PAM page with the index and object (plus a "size" value
631  * necessary for decompression) immediately preceding the compressed data.
632  */
633
634 #define ZVH_SENTINEL  0x43214321
635
636 struct zv_hdr {
637         uint32_t pool_id;
638         struct tmem_oid oid;
639         uint32_t index;
640         DECL_SENTINEL
641 };
642
643 /* rudimentary policy limits */
644 /* total number of persistent pages may not exceed this percentage */
645 static unsigned int zv_page_count_policy_percent = 75;
646 /*
647  * byte count defining poor compression; pages with greater zsize will be
648  * rejected
649  */
650 static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
651 /*
652  * byte count defining poor *mean* compression; pages with greater zsize
653  * will be rejected until sufficient better-compressed pages are accepted
654  * driving the mean below this threshold
655  */
656 static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
657
658 static unsigned long zv_curr_dist_counts[NCHUNKS];
659 static unsigned long zv_cumul_dist_counts[NCHUNKS];
660
661 static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
662                                 struct tmem_oid *oid, uint32_t index,
663                                 void *cdata, unsigned clen)
664 {
665         struct page *page;
666         struct zv_hdr *zv = NULL;
667         uint32_t offset;
668         int alloc_size = clen + sizeof(struct zv_hdr);
669         int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
670         int ret;
671
672         BUG_ON(!irqs_disabled());
673         BUG_ON(chunks >= NCHUNKS);
674         ret = xv_malloc(xvpool, alloc_size,
675                         &page, &offset, ZCACHE_GFP_MASK);
676         if (unlikely(ret))
677                 goto out;
678         zv_curr_dist_counts[chunks]++;
679         zv_cumul_dist_counts[chunks]++;
680         zv = kmap_atomic(page, KM_USER0) + offset;
681         zv->index = index;
682         zv->oid = *oid;
683         zv->pool_id = pool_id;
684         SET_SENTINEL(zv, ZVH);
685         memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
686         kunmap_atomic(zv, KM_USER0);
687 out:
688         return zv;
689 }
690
691 static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
692 {
693         unsigned long flags;
694         struct page *page;
695         uint32_t offset;
696         uint16_t size = xv_get_object_size(zv);
697         int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
698
699         ASSERT_SENTINEL(zv, ZVH);
700         BUG_ON(chunks >= NCHUNKS);
701         zv_curr_dist_counts[chunks]--;
702         size -= sizeof(*zv);
703         BUG_ON(size == 0);
704         INVERT_SENTINEL(zv, ZVH);
705         page = virt_to_page(zv);
706         offset = (unsigned long)zv & ~PAGE_MASK;
707         local_irq_save(flags);
708         xv_free(xvpool, page, offset);
709         local_irq_restore(flags);
710 }
711
712 static void zv_decompress(struct page *page, struct zv_hdr *zv)
713 {
714         size_t clen = PAGE_SIZE;
715         char *to_va;
716         unsigned size;
717         int ret;
718
719         ASSERT_SENTINEL(zv, ZVH);
720         size = xv_get_object_size(zv) - sizeof(*zv);
721         BUG_ON(size == 0);
722         to_va = kmap_atomic(page, KM_USER0);
723         ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
724                                         size, to_va, &clen);
725         kunmap_atomic(to_va, KM_USER0);
726         BUG_ON(ret != LZO_E_OK);
727         BUG_ON(clen != PAGE_SIZE);
728 }
729
730 #ifdef CONFIG_SYSFS
731 /*
732  * show a distribution of compression stats for zv pages.
733  */
734
735 static int zv_curr_dist_counts_show(char *buf)
736 {
737         unsigned long i, n, chunks = 0, sum_total_chunks = 0;
738         char *p = buf;
739
740         for (i = 0; i < NCHUNKS; i++) {
741                 n = zv_curr_dist_counts[i];
742                 p += sprintf(p, "%lu ", n);
743                 chunks += n;
744                 sum_total_chunks += i * n;
745         }
746         p += sprintf(p, "mean:%lu\n",
747                 chunks == 0 ? 0 : sum_total_chunks / chunks);
748         return p - buf;
749 }
750
751 static int zv_cumul_dist_counts_show(char *buf)
752 {
753         unsigned long i, n, chunks = 0, sum_total_chunks = 0;
754         char *p = buf;
755
756         for (i = 0; i < NCHUNKS; i++) {
757                 n = zv_cumul_dist_counts[i];
758                 p += sprintf(p, "%lu ", n);
759                 chunks += n;
760                 sum_total_chunks += i * n;
761         }
762         p += sprintf(p, "mean:%lu\n",
763                 chunks == 0 ? 0 : sum_total_chunks / chunks);
764         return p - buf;
765 }
766
767 /*
768  * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
769  * pages that don't compress to less than this value (including metadata
770  * overhead) to be rejected.  We don't allow the value to get too close
771  * to PAGE_SIZE.
772  */
773 static ssize_t zv_max_zsize_show(struct kobject *kobj,
774                                     struct kobj_attribute *attr,
775                                     char *buf)
776 {
777         return sprintf(buf, "%u\n", zv_max_zsize);
778 }
779
780 static ssize_t zv_max_zsize_store(struct kobject *kobj,
781                                     struct kobj_attribute *attr,
782                                     const char *buf, size_t count)
783 {
784         unsigned long val;
785         int err;
786
787         if (!capable(CAP_SYS_ADMIN))
788                 return -EPERM;
789
790         err = kstrtoul(buf, 10, &val);
791         if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
792                 return -EINVAL;
793         zv_max_zsize = val;
794         return count;
795 }
796
797 /*
798  * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
799  * pages that don't compress to less than this value (including metadata
800  * overhead) to be rejected UNLESS the mean compression is also smaller
801  * than this value.  In other words, we are load-balancing-by-zsize the
802  * accepted pages.  Again, we don't allow the value to get too close
803  * to PAGE_SIZE.
804  */
805 static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
806                                     struct kobj_attribute *attr,
807                                     char *buf)
808 {
809         return sprintf(buf, "%u\n", zv_max_mean_zsize);
810 }
811
812 static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
813                                     struct kobj_attribute *attr,
814                                     const char *buf, size_t count)
815 {
816         unsigned long val;
817         int err;
818
819         if (!capable(CAP_SYS_ADMIN))
820                 return -EPERM;
821
822         err = kstrtoul(buf, 10, &val);
823         if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
824                 return -EINVAL;
825         zv_max_mean_zsize = val;
826         return count;
827 }
828
829 /*
830  * setting zv_page_count_policy_percent via sysfs sets an upper bound of
831  * persistent (e.g. swap) pages that will be retained according to:
832  *     (zv_page_count_policy_percent * totalram_pages) / 100)
833  * when that limit is reached, further puts will be rejected (until
834  * some pages have been flushed).  Note that, due to compression,
835  * this number may exceed 100; it defaults to 75 and we set an
836  * arbitary limit of 150.  A poor choice will almost certainly result
837  * in OOM's, so this value should only be changed prudently.
838  */
839 static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
840                                                  struct kobj_attribute *attr,
841                                                  char *buf)
842 {
843         return sprintf(buf, "%u\n", zv_page_count_policy_percent);
844 }
845
846 static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
847                                                   struct kobj_attribute *attr,
848                                                   const char *buf, size_t count)
849 {
850         unsigned long val;
851         int err;
852
853         if (!capable(CAP_SYS_ADMIN))
854                 return -EPERM;
855
856         err = kstrtoul(buf, 10, &val);
857         if (err || (val == 0) || (val > 150))
858                 return -EINVAL;
859         zv_page_count_policy_percent = val;
860         return count;
861 }
862
863 static struct kobj_attribute zcache_zv_max_zsize_attr = {
864                 .attr = { .name = "zv_max_zsize", .mode = 0644 },
865                 .show = zv_max_zsize_show,
866                 .store = zv_max_zsize_store,
867 };
868
869 static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
870                 .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
871                 .show = zv_max_mean_zsize_show,
872                 .store = zv_max_mean_zsize_store,
873 };
874
875 static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
876                 .attr = { .name = "zv_page_count_policy_percent",
877                           .mode = 0644 },
878                 .show = zv_page_count_policy_percent_show,
879                 .store = zv_page_count_policy_percent_store,
880 };
881 #endif
882
883 /*
884  * zcache core code starts here
885  */
886
887 /* useful stats not collected by cleancache or frontswap */
888 static unsigned long zcache_flush_total;
889 static unsigned long zcache_flush_found;
890 static unsigned long zcache_flobj_total;
891 static unsigned long zcache_flobj_found;
892 static unsigned long zcache_failed_eph_puts;
893 static unsigned long zcache_failed_pers_puts;
894
895 /*
896  * Tmem operations assume the poolid implies the invoking client.
897  * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
898  * RAMster has each client numbered by cluster node, and a KVM version
899  * of zcache would have one client per guest and each client might
900  * have a poolid==N.
901  */
902 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
903 {
904         struct tmem_pool *pool = NULL;
905         struct zcache_client *cli = NULL;
906
907         if (cli_id == LOCAL_CLIENT)
908                 cli = &zcache_host;
909         else {
910                 if (cli_id >= MAX_CLIENTS)
911                         goto out;
912                 cli = &zcache_clients[cli_id];
913                 if (cli == NULL)
914                         goto out;
915                 atomic_inc(&cli->refcount);
916         }
917         if (poolid < MAX_POOLS_PER_CLIENT) {
918                 pool = cli->tmem_pools[poolid];
919                 if (pool != NULL)
920                         atomic_inc(&pool->refcount);
921         }
922 out:
923         return pool;
924 }
925
926 static void zcache_put_pool(struct tmem_pool *pool)
927 {
928         struct zcache_client *cli = NULL;
929
930         if (pool == NULL)
931                 BUG();
932         cli = pool->client;
933         atomic_dec(&pool->refcount);
934         atomic_dec(&cli->refcount);
935 }
936
937 int zcache_new_client(uint16_t cli_id)
938 {
939         struct zcache_client *cli = NULL;
940         int ret = -1;
941
942         if (cli_id == LOCAL_CLIENT)
943                 cli = &zcache_host;
944         else if ((unsigned int)cli_id < MAX_CLIENTS)
945                 cli = &zcache_clients[cli_id];
946         if (cli == NULL)
947                 goto out;
948         if (cli->allocated)
949                 goto out;
950         cli->allocated = 1;
951 #ifdef CONFIG_FRONTSWAP
952         cli->xvpool = xv_create_pool();
953         if (cli->xvpool == NULL)
954                 goto out;
955 #endif
956         ret = 0;
957 out:
958         return ret;
959 }
960
961 /* counters for debugging */
962 static unsigned long zcache_failed_get_free_pages;
963 static unsigned long zcache_failed_alloc;
964 static unsigned long zcache_put_to_flush;
965
966 /*
967  * for now, used named slabs so can easily track usage; later can
968  * either just use kmalloc, or perhaps add a slab-like allocator
969  * to more carefully manage total memory utilization
970  */
971 static struct kmem_cache *zcache_objnode_cache;
972 static struct kmem_cache *zcache_obj_cache;
973 static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
974 static unsigned long zcache_curr_obj_count_max;
975 static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
976 static unsigned long zcache_curr_objnode_count_max;
977
978 /*
979  * to avoid memory allocation recursion (e.g. due to direct reclaim), we
980  * preload all necessary data structures so the hostops callbacks never
981  * actually do a malloc
982  */
983 struct zcache_preload {
984         void *page;
985         struct tmem_obj *obj;
986         int nr;
987         struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
988 };
989 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
990
991 static int zcache_do_preload(struct tmem_pool *pool)
992 {
993         struct zcache_preload *kp;
994         struct tmem_objnode *objnode;
995         struct tmem_obj *obj;
996         void *page;
997         int ret = -ENOMEM;
998
999         if (unlikely(zcache_objnode_cache == NULL))
1000                 goto out;
1001         if (unlikely(zcache_obj_cache == NULL))
1002                 goto out;
1003         preempt_disable();
1004         kp = &__get_cpu_var(zcache_preloads);
1005         while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1006                 preempt_enable_no_resched();
1007                 objnode = kmem_cache_alloc(zcache_objnode_cache,
1008                                 ZCACHE_GFP_MASK);
1009                 if (unlikely(objnode == NULL)) {
1010                         zcache_failed_alloc++;
1011                         goto out;
1012                 }
1013                 preempt_disable();
1014                 kp = &__get_cpu_var(zcache_preloads);
1015                 if (kp->nr < ARRAY_SIZE(kp->objnodes))
1016                         kp->objnodes[kp->nr++] = objnode;
1017                 else
1018                         kmem_cache_free(zcache_objnode_cache, objnode);
1019         }
1020         preempt_enable_no_resched();
1021         obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1022         if (unlikely(obj == NULL)) {
1023                 zcache_failed_alloc++;
1024                 goto out;
1025         }
1026         page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1027         if (unlikely(page == NULL)) {
1028                 zcache_failed_get_free_pages++;
1029                 kmem_cache_free(zcache_obj_cache, obj);
1030                 goto out;
1031         }
1032         preempt_disable();
1033         kp = &__get_cpu_var(zcache_preloads);
1034         if (kp->obj == NULL)
1035                 kp->obj = obj;
1036         else
1037                 kmem_cache_free(zcache_obj_cache, obj);
1038         if (kp->page == NULL)
1039                 kp->page = page;
1040         else
1041                 free_page((unsigned long)page);
1042         ret = 0;
1043 out:
1044         return ret;
1045 }
1046
1047 static void *zcache_get_free_page(void)
1048 {
1049         struct zcache_preload *kp;
1050         void *page;
1051
1052         kp = &__get_cpu_var(zcache_preloads);
1053         page = kp->page;
1054         BUG_ON(page == NULL);
1055         kp->page = NULL;
1056         return page;
1057 }
1058
1059 static void zcache_free_page(void *p)
1060 {
1061         free_page((unsigned long)p);
1062 }
1063
1064 /*
1065  * zcache implementation for tmem host ops
1066  */
1067
1068 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1069 {
1070         struct tmem_objnode *objnode = NULL;
1071         unsigned long count;
1072         struct zcache_preload *kp;
1073
1074         kp = &__get_cpu_var(zcache_preloads);
1075         if (kp->nr <= 0)
1076                 goto out;
1077         objnode = kp->objnodes[kp->nr - 1];
1078         BUG_ON(objnode == NULL);
1079         kp->objnodes[kp->nr - 1] = NULL;
1080         kp->nr--;
1081         count = atomic_inc_return(&zcache_curr_objnode_count);
1082         if (count > zcache_curr_objnode_count_max)
1083                 zcache_curr_objnode_count_max = count;
1084 out:
1085         return objnode;
1086 }
1087
1088 static void zcache_objnode_free(struct tmem_objnode *objnode,
1089                                         struct tmem_pool *pool)
1090 {
1091         atomic_dec(&zcache_curr_objnode_count);
1092         BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1093         kmem_cache_free(zcache_objnode_cache, objnode);
1094 }
1095
1096 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1097 {
1098         struct tmem_obj *obj = NULL;
1099         unsigned long count;
1100         struct zcache_preload *kp;
1101
1102         kp = &__get_cpu_var(zcache_preloads);
1103         obj = kp->obj;
1104         BUG_ON(obj == NULL);
1105         kp->obj = NULL;
1106         count = atomic_inc_return(&zcache_curr_obj_count);
1107         if (count > zcache_curr_obj_count_max)
1108                 zcache_curr_obj_count_max = count;
1109         return obj;
1110 }
1111
1112 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1113 {
1114         atomic_dec(&zcache_curr_obj_count);
1115         BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1116         kmem_cache_free(zcache_obj_cache, obj);
1117 }
1118
1119 static struct tmem_hostops zcache_hostops = {
1120         .obj_alloc = zcache_obj_alloc,
1121         .obj_free = zcache_obj_free,
1122         .objnode_alloc = zcache_objnode_alloc,
1123         .objnode_free = zcache_objnode_free,
1124 };
1125
1126 /*
1127  * zcache implementations for PAM page descriptor ops
1128  */
1129
1130 static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1131 static unsigned long zcache_curr_eph_pampd_count_max;
1132 static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1133 static unsigned long zcache_curr_pers_pampd_count_max;
1134
1135 /* forward reference */
1136 static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
1137
1138 static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1139                                 struct tmem_pool *pool, struct tmem_oid *oid,
1140                                  uint32_t index)
1141 {
1142         void *pampd = NULL, *cdata;
1143         size_t clen;
1144         int ret;
1145         unsigned long count;
1146         struct page *page = (struct page *)(data);
1147         struct zcache_client *cli = pool->client;
1148         uint16_t client_id = get_client_id_from_client(cli);
1149         unsigned long zv_mean_zsize;
1150         unsigned long curr_pers_pampd_count;
1151         u64 total_zsize;
1152
1153         if (eph) {
1154                 ret = zcache_compress(page, &cdata, &clen);
1155                 if (ret == 0)
1156                         goto out;
1157                 if (clen == 0 || clen > zbud_max_buddy_size()) {
1158                         zcache_compress_poor++;
1159                         goto out;
1160                 }
1161                 pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1162                                                 index, page, cdata, clen);
1163                 if (pampd != NULL) {
1164                         count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1165                         if (count > zcache_curr_eph_pampd_count_max)
1166                                 zcache_curr_eph_pampd_count_max = count;
1167                 }
1168         } else {
1169                 curr_pers_pampd_count =
1170                         atomic_read(&zcache_curr_pers_pampd_count);
1171                 if (curr_pers_pampd_count >
1172                     (zv_page_count_policy_percent * totalram_pages) / 100)
1173                         goto out;
1174                 ret = zcache_compress(page, &cdata, &clen);
1175                 if (ret == 0)
1176                         goto out;
1177                 /* reject if compression is too poor */
1178                 if (clen > zv_max_zsize) {
1179                         zcache_compress_poor++;
1180                         goto out;
1181                 }
1182                 /* reject if mean compression is too poor */
1183                 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1184                         total_zsize = xv_get_total_size_bytes(cli->xvpool);
1185                         zv_mean_zsize = div_u64(total_zsize,
1186                                                 curr_pers_pampd_count);
1187                         if (zv_mean_zsize > zv_max_mean_zsize) {
1188                                 zcache_mean_compress_poor++;
1189                                 goto out;
1190                         }
1191                 }
1192                 pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
1193                                                 oid, index, cdata, clen);
1194                 if (pampd == NULL)
1195                         goto out;
1196                 count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1197                 if (count > zcache_curr_pers_pampd_count_max)
1198                         zcache_curr_pers_pampd_count_max = count;
1199         }
1200 out:
1201         return pampd;
1202 }
1203
1204 /*
1205  * fill the pageframe corresponding to the struct page with the data
1206  * from the passed pampd
1207  */
1208 static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1209                                         void *pampd, struct tmem_pool *pool,
1210                                         struct tmem_oid *oid, uint32_t index)
1211 {
1212         int ret = 0;
1213
1214         BUG_ON(is_ephemeral(pool));
1215         zv_decompress((struct page *)(data), pampd);
1216         return ret;
1217 }
1218
1219 /*
1220  * fill the pageframe corresponding to the struct page with the data
1221  * from the passed pampd
1222  */
1223 static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1224                                         void *pampd, struct tmem_pool *pool,
1225                                         struct tmem_oid *oid, uint32_t index)
1226 {
1227         int ret = 0;
1228
1229         BUG_ON(!is_ephemeral(pool));
1230         zbud_decompress((struct page *)(data), pampd);
1231         zbud_free_and_delist((struct zbud_hdr *)pampd);
1232         atomic_dec(&zcache_curr_eph_pampd_count);
1233         return ret;
1234 }
1235
1236 /*
1237  * free the pampd and remove it from any zcache lists
1238  * pampd must no longer be pointed to from any tmem data structures!
1239  */
1240 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1241                                 struct tmem_oid *oid, uint32_t index)
1242 {
1243         struct zcache_client *cli = pool->client;
1244
1245         if (is_ephemeral(pool)) {
1246                 zbud_free_and_delist((struct zbud_hdr *)pampd);
1247                 atomic_dec(&zcache_curr_eph_pampd_count);
1248                 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1249         } else {
1250                 zv_free(cli->xvpool, (struct zv_hdr *)pampd);
1251                 atomic_dec(&zcache_curr_pers_pampd_count);
1252                 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1253         }
1254 }
1255
1256 static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
1257 {
1258 }
1259
1260 static void zcache_pampd_new_obj(struct tmem_obj *obj)
1261 {
1262 }
1263
1264 static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
1265 {
1266         return -1;
1267 }
1268
1269 static bool zcache_pampd_is_remote(void *pampd)
1270 {
1271         return 0;
1272 }
1273
1274 static struct tmem_pamops zcache_pamops = {
1275         .create = zcache_pampd_create,
1276         .get_data = zcache_pampd_get_data,
1277         .get_data_and_free = zcache_pampd_get_data_and_free,
1278         .free = zcache_pampd_free,
1279         .free_obj = zcache_pampd_free_obj,
1280         .new_obj = zcache_pampd_new_obj,
1281         .replace_in_obj = zcache_pampd_replace_in_obj,
1282         .is_remote = zcache_pampd_is_remote,
1283 };
1284
1285 /*
1286  * zcache compression/decompression and related per-cpu stuff
1287  */
1288
1289 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
1290 #define LZO_DSTMEM_PAGE_ORDER 1
1291 static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
1292 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1293
1294 static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
1295 {
1296         int ret = 0;
1297         unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1298         unsigned char *wmem = __get_cpu_var(zcache_workmem);
1299         char *from_va;
1300
1301         BUG_ON(!irqs_disabled());
1302         if (unlikely(dmem == NULL || wmem == NULL))
1303                 goto out;  /* no buffer, so can't compress */
1304         from_va = kmap_atomic(from, KM_USER0);
1305         mb();
1306         ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
1307         BUG_ON(ret != LZO_E_OK);
1308         *out_va = dmem;
1309         kunmap_atomic(from_va, KM_USER0);
1310         ret = 1;
1311 out:
1312         return ret;
1313 }
1314
1315
1316 static int zcache_cpu_notifier(struct notifier_block *nb,
1317                                 unsigned long action, void *pcpu)
1318 {
1319         int cpu = (long)pcpu;
1320         struct zcache_preload *kp;
1321
1322         switch (action) {
1323         case CPU_UP_PREPARE:
1324                 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1325                         GFP_KERNEL | __GFP_REPEAT,
1326                         LZO_DSTMEM_PAGE_ORDER),
1327                 per_cpu(zcache_workmem, cpu) =
1328                         kzalloc(LZO1X_MEM_COMPRESS,
1329                                 GFP_KERNEL | __GFP_REPEAT);
1330                 break;
1331         case CPU_DEAD:
1332         case CPU_UP_CANCELED:
1333                 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1334                                 LZO_DSTMEM_PAGE_ORDER);
1335                 per_cpu(zcache_dstmem, cpu) = NULL;
1336                 kfree(per_cpu(zcache_workmem, cpu));
1337                 per_cpu(zcache_workmem, cpu) = NULL;
1338                 kp = &per_cpu(zcache_preloads, cpu);
1339                 while (kp->nr) {
1340                         kmem_cache_free(zcache_objnode_cache,
1341                                         kp->objnodes[kp->nr - 1]);
1342                         kp->objnodes[kp->nr - 1] = NULL;
1343                         kp->nr--;
1344                 }
1345                 if (kp->obj) {
1346                         kmem_cache_free(zcache_obj_cache, kp->obj);
1347                         kp->obj = NULL;
1348                 }
1349                 if (kp->page) {
1350                         free_page((unsigned long)kp->page);
1351                         kp->page = NULL;
1352                 }
1353                 break;
1354         default:
1355                 break;
1356         }
1357         return NOTIFY_OK;
1358 }
1359
1360 static struct notifier_block zcache_cpu_notifier_block = {
1361         .notifier_call = zcache_cpu_notifier
1362 };
1363
1364 #ifdef CONFIG_SYSFS
1365 #define ZCACHE_SYSFS_RO(_name) \
1366         static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1367                                 struct kobj_attribute *attr, char *buf) \
1368         { \
1369                 return sprintf(buf, "%lu\n", zcache_##_name); \
1370         } \
1371         static struct kobj_attribute zcache_##_name##_attr = { \
1372                 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1373                 .show = zcache_##_name##_show, \
1374         }
1375
1376 #define ZCACHE_SYSFS_RO_ATOMIC(_name) \
1377         static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1378                                 struct kobj_attribute *attr, char *buf) \
1379         { \
1380             return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
1381         } \
1382         static struct kobj_attribute zcache_##_name##_attr = { \
1383                 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1384                 .show = zcache_##_name##_show, \
1385         }
1386
1387 #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
1388         static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1389                                 struct kobj_attribute *attr, char *buf) \
1390         { \
1391             return _func(buf); \
1392         } \
1393         static struct kobj_attribute zcache_##_name##_attr = { \
1394                 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1395                 .show = zcache_##_name##_show, \
1396         }
1397
1398 ZCACHE_SYSFS_RO(curr_obj_count_max);
1399 ZCACHE_SYSFS_RO(curr_objnode_count_max);
1400 ZCACHE_SYSFS_RO(flush_total);
1401 ZCACHE_SYSFS_RO(flush_found);
1402 ZCACHE_SYSFS_RO(flobj_total);
1403 ZCACHE_SYSFS_RO(flobj_found);
1404 ZCACHE_SYSFS_RO(failed_eph_puts);
1405 ZCACHE_SYSFS_RO(failed_pers_puts);
1406 ZCACHE_SYSFS_RO(zbud_curr_zbytes);
1407 ZCACHE_SYSFS_RO(zbud_cumul_zpages);
1408 ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
1409 ZCACHE_SYSFS_RO(zbud_buddied_count);
1410 ZCACHE_SYSFS_RO(zbpg_unused_list_count);
1411 ZCACHE_SYSFS_RO(evicted_raw_pages);
1412 ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
1413 ZCACHE_SYSFS_RO(evicted_buddied_pages);
1414 ZCACHE_SYSFS_RO(failed_get_free_pages);
1415 ZCACHE_SYSFS_RO(failed_alloc);
1416 ZCACHE_SYSFS_RO(put_to_flush);
1417 ZCACHE_SYSFS_RO(compress_poor);
1418 ZCACHE_SYSFS_RO(mean_compress_poor);
1419 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
1420 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
1421 ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
1422 ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
1423 ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
1424                         zbud_show_unbuddied_list_counts);
1425 ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
1426                         zbud_show_cumul_chunk_counts);
1427 ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
1428                         zv_curr_dist_counts_show);
1429 ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
1430                         zv_cumul_dist_counts_show);
1431
1432 static struct attribute *zcache_attrs[] = {
1433         &zcache_curr_obj_count_attr.attr,
1434         &zcache_curr_obj_count_max_attr.attr,
1435         &zcache_curr_objnode_count_attr.attr,
1436         &zcache_curr_objnode_count_max_attr.attr,
1437         &zcache_flush_total_attr.attr,
1438         &zcache_flobj_total_attr.attr,
1439         &zcache_flush_found_attr.attr,
1440         &zcache_flobj_found_attr.attr,
1441         &zcache_failed_eph_puts_attr.attr,
1442         &zcache_failed_pers_puts_attr.attr,
1443         &zcache_compress_poor_attr.attr,
1444         &zcache_mean_compress_poor_attr.attr,
1445         &zcache_zbud_curr_raw_pages_attr.attr,
1446         &zcache_zbud_curr_zpages_attr.attr,
1447         &zcache_zbud_curr_zbytes_attr.attr,
1448         &zcache_zbud_cumul_zpages_attr.attr,
1449         &zcache_zbud_cumul_zbytes_attr.attr,
1450         &zcache_zbud_buddied_count_attr.attr,
1451         &zcache_zbpg_unused_list_count_attr.attr,
1452         &zcache_evicted_raw_pages_attr.attr,
1453         &zcache_evicted_unbuddied_pages_attr.attr,
1454         &zcache_evicted_buddied_pages_attr.attr,
1455         &zcache_failed_get_free_pages_attr.attr,
1456         &zcache_failed_alloc_attr.attr,
1457         &zcache_put_to_flush_attr.attr,
1458         &zcache_zbud_unbuddied_list_counts_attr.attr,
1459         &zcache_zbud_cumul_chunk_counts_attr.attr,
1460         &zcache_zv_curr_dist_counts_attr.attr,
1461         &zcache_zv_cumul_dist_counts_attr.attr,
1462         &zcache_zv_max_zsize_attr.attr,
1463         &zcache_zv_max_mean_zsize_attr.attr,
1464         &zcache_zv_page_count_policy_percent_attr.attr,
1465         NULL,
1466 };
1467
1468 static struct attribute_group zcache_attr_group = {
1469         .attrs = zcache_attrs,
1470         .name = "zcache",
1471 };
1472
1473 #endif /* CONFIG_SYSFS */
1474 /*
1475  * When zcache is disabled ("frozen"), pools can be created and destroyed,
1476  * but all puts (and thus all other operations that require memory allocation)
1477  * must fail.  If zcache is unfrozen, accepts puts, then frozen again,
1478  * data consistency requires all puts while frozen to be converted into
1479  * flushes.
1480  */
1481 static bool zcache_freeze;
1482
1483 /*
1484  * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1485  */
1486 static int shrink_zcache_memory(struct shrinker *shrink,
1487                                 struct shrink_control *sc)
1488 {
1489         int ret = -1;
1490         int nr = sc->nr_to_scan;
1491         gfp_t gfp_mask = sc->gfp_mask;
1492
1493         if (nr >= 0) {
1494                 if (!(gfp_mask & __GFP_FS))
1495                         /* does this case really need to be skipped? */
1496                         goto out;
1497                 zbud_evict_pages(nr);
1498         }
1499         ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
1500 out:
1501         return ret;
1502 }
1503
1504 static struct shrinker zcache_shrinker = {
1505         .shrink = shrink_zcache_memory,
1506         .seeks = DEFAULT_SEEKS,
1507 };
1508
1509 /*
1510  * zcache shims between cleancache/frontswap ops and tmem
1511  */
1512
1513 static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1514                                 uint32_t index, struct page *page)
1515 {
1516         struct tmem_pool *pool;
1517         int ret = -1;
1518
1519         BUG_ON(!irqs_disabled());
1520         pool = zcache_get_pool_by_id(cli_id, pool_id);
1521         if (unlikely(pool == NULL))
1522                 goto out;
1523         if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1524                 /* preload does preempt_disable on success */
1525                 ret = tmem_put(pool, oidp, index, (char *)(page),
1526                                 PAGE_SIZE, 0, is_ephemeral(pool));
1527                 if (ret < 0) {
1528                         if (is_ephemeral(pool))
1529                                 zcache_failed_eph_puts++;
1530                         else
1531                                 zcache_failed_pers_puts++;
1532                 }
1533                 zcache_put_pool(pool);
1534                 preempt_enable_no_resched();
1535         } else {
1536                 zcache_put_to_flush++;
1537                 if (atomic_read(&pool->obj_count) > 0)
1538                         /* the put fails whether the flush succeeds or not */
1539                         (void)tmem_flush_page(pool, oidp, index);
1540                 zcache_put_pool(pool);
1541         }
1542 out:
1543         return ret;
1544 }
1545
1546 static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1547                                 uint32_t index, struct page *page)
1548 {
1549         struct tmem_pool *pool;
1550         int ret = -1;
1551         unsigned long flags;
1552         size_t size = PAGE_SIZE;
1553
1554         local_irq_save(flags);
1555         pool = zcache_get_pool_by_id(cli_id, pool_id);
1556         if (likely(pool != NULL)) {
1557                 if (atomic_read(&pool->obj_count) > 0)
1558                         ret = tmem_get(pool, oidp, index, (char *)(page),
1559                                         &size, 0, is_ephemeral(pool));
1560                 zcache_put_pool(pool);
1561         }
1562         local_irq_restore(flags);
1563         return ret;
1564 }
1565
1566 static int zcache_flush_page(int cli_id, int pool_id,
1567                                 struct tmem_oid *oidp, uint32_t index)
1568 {
1569         struct tmem_pool *pool;
1570         int ret = -1;
1571         unsigned long flags;
1572
1573         local_irq_save(flags);
1574         zcache_flush_total++;
1575         pool = zcache_get_pool_by_id(cli_id, pool_id);
1576         if (likely(pool != NULL)) {
1577                 if (atomic_read(&pool->obj_count) > 0)
1578                         ret = tmem_flush_page(pool, oidp, index);
1579                 zcache_put_pool(pool);
1580         }
1581         if (ret >= 0)
1582                 zcache_flush_found++;
1583         local_irq_restore(flags);
1584         return ret;
1585 }
1586
1587 static int zcache_flush_object(int cli_id, int pool_id,
1588                                 struct tmem_oid *oidp)
1589 {
1590         struct tmem_pool *pool;
1591         int ret = -1;
1592         unsigned long flags;
1593
1594         local_irq_save(flags);
1595         zcache_flobj_total++;
1596         pool = zcache_get_pool_by_id(cli_id, pool_id);
1597         if (likely(pool != NULL)) {
1598                 if (atomic_read(&pool->obj_count) > 0)
1599                         ret = tmem_flush_object(pool, oidp);
1600                 zcache_put_pool(pool);
1601         }
1602         if (ret >= 0)
1603                 zcache_flobj_found++;
1604         local_irq_restore(flags);
1605         return ret;
1606 }
1607
1608 static int zcache_destroy_pool(int cli_id, int pool_id)
1609 {
1610         struct tmem_pool *pool = NULL;
1611         struct zcache_client *cli = NULL;
1612         int ret = -1;
1613
1614         if (pool_id < 0)
1615                 goto out;
1616         if (cli_id == LOCAL_CLIENT)
1617                 cli = &zcache_host;
1618         else if ((unsigned int)cli_id < MAX_CLIENTS)
1619                 cli = &zcache_clients[cli_id];
1620         if (cli == NULL)
1621                 goto out;
1622         atomic_inc(&cli->refcount);
1623         pool = cli->tmem_pools[pool_id];
1624         if (pool == NULL)
1625                 goto out;
1626         cli->tmem_pools[pool_id] = NULL;
1627         /* wait for pool activity on other cpus to quiesce */
1628         while (atomic_read(&pool->refcount) != 0)
1629                 ;
1630         atomic_dec(&cli->refcount);
1631         local_bh_disable();
1632         ret = tmem_destroy_pool(pool);
1633         local_bh_enable();
1634         kfree(pool);
1635         pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
1636                         pool_id, cli_id);
1637 out:
1638         return ret;
1639 }
1640
1641 static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1642 {
1643         int poolid = -1;
1644         struct tmem_pool *pool;
1645         struct zcache_client *cli = NULL;
1646
1647         if (cli_id == LOCAL_CLIENT)
1648                 cli = &zcache_host;
1649         else if ((unsigned int)cli_id < MAX_CLIENTS)
1650                 cli = &zcache_clients[cli_id];
1651         if (cli == NULL)
1652                 goto out;
1653         atomic_inc(&cli->refcount);
1654         pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
1655         if (pool == NULL) {
1656                 pr_info("zcache: pool creation failed: out of memory\n");
1657                 goto out;
1658         }
1659
1660         for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
1661                 if (cli->tmem_pools[poolid] == NULL)
1662                         break;
1663         if (poolid >= MAX_POOLS_PER_CLIENT) {
1664                 pr_info("zcache: pool creation failed: max exceeded\n");
1665                 kfree(pool);
1666                 poolid = -1;
1667                 goto out;
1668         }
1669         atomic_set(&pool->refcount, 0);
1670         pool->client = cli;
1671         pool->pool_id = poolid;
1672         tmem_new_pool(pool, flags);
1673         cli->tmem_pools[poolid] = pool;
1674         pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
1675                 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1676                 poolid, cli_id);
1677 out:
1678         if (cli != NULL)
1679                 atomic_dec(&cli->refcount);
1680         return poolid;
1681 }
1682
1683 /**********
1684  * Two kernel functionalities currently can be layered on top of tmem.
1685  * These are "cleancache" which is used as a second-chance cache for clean
1686  * page cache pages; and "frontswap" which is used for swap pages
1687  * to avoid writes to disk.  A generic "shim" is provided here for each
1688  * to translate in-kernel semantics to zcache semantics.
1689  */
1690
1691 #ifdef CONFIG_CLEANCACHE
1692 static void zcache_cleancache_put_page(int pool_id,
1693                                         struct cleancache_filekey key,
1694                                         pgoff_t index, struct page *page)
1695 {
1696         u32 ind = (u32) index;
1697         struct tmem_oid oid = *(struct tmem_oid *)&key;
1698
1699         if (likely(ind == index))
1700                 (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1701 }
1702
1703 static int zcache_cleancache_get_page(int pool_id,
1704                                         struct cleancache_filekey key,
1705                                         pgoff_t index, struct page *page)
1706 {
1707         u32 ind = (u32) index;
1708         struct tmem_oid oid = *(struct tmem_oid *)&key;
1709         int ret = -1;
1710
1711         if (likely(ind == index))
1712                 ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1713         return ret;
1714 }
1715
1716 static void zcache_cleancache_flush_page(int pool_id,
1717                                         struct cleancache_filekey key,
1718                                         pgoff_t index)
1719 {
1720         u32 ind = (u32) index;
1721         struct tmem_oid oid = *(struct tmem_oid *)&key;
1722
1723         if (likely(ind == index))
1724                 (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1725 }
1726
1727 static void zcache_cleancache_flush_inode(int pool_id,
1728                                         struct cleancache_filekey key)
1729 {
1730         struct tmem_oid oid = *(struct tmem_oid *)&key;
1731
1732         (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1733 }
1734
1735 static void zcache_cleancache_flush_fs(int pool_id)
1736 {
1737         if (pool_id >= 0)
1738                 (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
1739 }
1740
1741 static int zcache_cleancache_init_fs(size_t pagesize)
1742 {
1743         BUG_ON(sizeof(struct cleancache_filekey) !=
1744                                 sizeof(struct tmem_oid));
1745         BUG_ON(pagesize != PAGE_SIZE);
1746         return zcache_new_pool(LOCAL_CLIENT, 0);
1747 }
1748
1749 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1750 {
1751         /* shared pools are unsupported and map to private */
1752         BUG_ON(sizeof(struct cleancache_filekey) !=
1753                                 sizeof(struct tmem_oid));
1754         BUG_ON(pagesize != PAGE_SIZE);
1755         return zcache_new_pool(LOCAL_CLIENT, 0);
1756 }
1757
1758 static struct cleancache_ops zcache_cleancache_ops = {
1759         .put_page = zcache_cleancache_put_page,
1760         .get_page = zcache_cleancache_get_page,
1761         .flush_page = zcache_cleancache_flush_page,
1762         .flush_inode = zcache_cleancache_flush_inode,
1763         .flush_fs = zcache_cleancache_flush_fs,
1764         .init_shared_fs = zcache_cleancache_init_shared_fs,
1765         .init_fs = zcache_cleancache_init_fs
1766 };
1767
1768 struct cleancache_ops zcache_cleancache_register_ops(void)
1769 {
1770         struct cleancache_ops old_ops =
1771                 cleancache_register_ops(&zcache_cleancache_ops);
1772
1773         return old_ops;
1774 }
1775 #endif
1776
1777 #ifdef CONFIG_FRONTSWAP
1778 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1779 static int zcache_frontswap_poolid = -1;
1780
1781 /*
1782  * Swizzling increases objects per swaptype, increasing tmem concurrency
1783  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
1784  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1785  * frontswap_get_page()
1786  */
1787 #define SWIZ_BITS               27
1788 #define SWIZ_MASK               ((1 << SWIZ_BITS) - 1)
1789 #define _oswiz(_type, _ind)     ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1790 #define iswiz(_ind)             (_ind >> SWIZ_BITS)
1791
1792 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1793 {
1794         struct tmem_oid oid = { .oid = { 0 } };
1795         oid.oid[0] = _oswiz(type, ind);
1796         return oid;
1797 }
1798
1799 static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1800                                    struct page *page)
1801 {
1802         u64 ind64 = (u64)offset;
1803         u32 ind = (u32)offset;
1804         struct tmem_oid oid = oswiz(type, ind);
1805         int ret = -1;
1806         unsigned long flags;
1807
1808         BUG_ON(!PageLocked(page));
1809         if (likely(ind64 == ind)) {
1810                 local_irq_save(flags);
1811                 ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1812                                         &oid, iswiz(ind), page);
1813                 local_irq_restore(flags);
1814         }
1815         return ret;
1816 }
1817
1818 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1819  * was not present (should never happen!) */
1820 static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
1821                                    struct page *page)
1822 {
1823         u64 ind64 = (u64)offset;
1824         u32 ind = (u32)offset;
1825         struct tmem_oid oid = oswiz(type, ind);
1826         int ret = -1;
1827
1828         BUG_ON(!PageLocked(page));
1829         if (likely(ind64 == ind))
1830                 ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1831                                         &oid, iswiz(ind), page);
1832         return ret;
1833 }
1834
1835 /* flush a single page from frontswap */
1836 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1837 {
1838         u64 ind64 = (u64)offset;
1839         u32 ind = (u32)offset;
1840         struct tmem_oid oid = oswiz(type, ind);
1841
1842         if (likely(ind64 == ind))
1843                 (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1844                                         &oid, iswiz(ind));
1845 }
1846
1847 /* flush all pages from the passed swaptype */
1848 static void zcache_frontswap_flush_area(unsigned type)
1849 {
1850         struct tmem_oid oid;
1851         int ind;
1852
1853         for (ind = SWIZ_MASK; ind >= 0; ind--) {
1854                 oid = oswiz(type, ind);
1855                 (void)zcache_flush_object(LOCAL_CLIENT,
1856                                                 zcache_frontswap_poolid, &oid);
1857         }
1858 }
1859
1860 static void zcache_frontswap_init(unsigned ignored)
1861 {
1862         /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1863         if (zcache_frontswap_poolid < 0)
1864                 zcache_frontswap_poolid =
1865                         zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
1866 }
1867
1868 static struct frontswap_ops zcache_frontswap_ops = {
1869         .put_page = zcache_frontswap_put_page,
1870         .get_page = zcache_frontswap_get_page,
1871         .flush_page = zcache_frontswap_flush_page,
1872         .flush_area = zcache_frontswap_flush_area,
1873         .init = zcache_frontswap_init
1874 };
1875
1876 struct frontswap_ops zcache_frontswap_register_ops(void)
1877 {
1878         struct frontswap_ops old_ops =
1879                 frontswap_register_ops(&zcache_frontswap_ops);
1880
1881         return old_ops;
1882 }
1883 #endif
1884
1885 /*
1886  * zcache initialization
1887  * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
1888  * NOTHING HAPPENS!
1889  */
1890
1891 static int zcache_enabled;
1892
1893 static int __init enable_zcache(char *s)
1894 {
1895         zcache_enabled = 1;
1896         return 1;
1897 }
1898 __setup("zcache", enable_zcache);
1899
1900 /* allow independent dynamic disabling of cleancache and frontswap */
1901
1902 static int use_cleancache = 1;
1903
1904 static int __init no_cleancache(char *s)
1905 {
1906         use_cleancache = 0;
1907         return 1;
1908 }
1909
1910 __setup("nocleancache", no_cleancache);
1911
1912 static int use_frontswap = 1;
1913
1914 static int __init no_frontswap(char *s)
1915 {
1916         use_frontswap = 0;
1917         return 1;
1918 }
1919
1920 __setup("nofrontswap", no_frontswap);
1921
1922 static int __init zcache_init(void)
1923 {
1924         int ret = 0;
1925
1926 #ifdef CONFIG_SYSFS
1927         ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
1928         if (ret) {
1929                 pr_err("zcache: can't create sysfs\n");
1930                 goto out;
1931         }
1932 #endif /* CONFIG_SYSFS */
1933 #if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
1934         if (zcache_enabled) {
1935                 unsigned int cpu;
1936
1937                 tmem_register_hostops(&zcache_hostops);
1938                 tmem_register_pamops(&zcache_pamops);
1939                 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
1940                 if (ret) {
1941                         pr_err("zcache: can't register cpu notifier\n");
1942                         goto out;
1943                 }
1944                 for_each_online_cpu(cpu) {
1945                         void *pcpu = (void *)(long)cpu;
1946                         zcache_cpu_notifier(&zcache_cpu_notifier_block,
1947                                 CPU_UP_PREPARE, pcpu);
1948                 }
1949         }
1950         zcache_objnode_cache = kmem_cache_create("zcache_objnode",
1951                                 sizeof(struct tmem_objnode), 0, 0, NULL);
1952         zcache_obj_cache = kmem_cache_create("zcache_obj",
1953                                 sizeof(struct tmem_obj), 0, 0, NULL);
1954         ret = zcache_new_client(LOCAL_CLIENT);
1955         if (ret) {
1956                 pr_err("zcache: can't create client\n");
1957                 goto out;
1958         }
1959 #endif
1960 #ifdef CONFIG_CLEANCACHE
1961         if (zcache_enabled && use_cleancache) {
1962                 struct cleancache_ops old_ops;
1963
1964                 zbud_init();
1965                 register_shrinker(&zcache_shrinker);
1966                 old_ops = zcache_cleancache_register_ops();
1967                 pr_info("zcache: cleancache enabled using kernel "
1968                         "transcendent memory and compression buddies\n");
1969                 if (old_ops.init_fs != NULL)
1970                         pr_warning("zcache: cleancache_ops overridden");
1971         }
1972 #endif
1973 #ifdef CONFIG_FRONTSWAP
1974         if (zcache_enabled && use_frontswap) {
1975                 struct frontswap_ops old_ops;
1976
1977                 old_ops = zcache_frontswap_register_ops();
1978                 pr_info("zcache: frontswap enabled using kernel "
1979                         "transcendent memory and xvmalloc\n");
1980                 if (old_ops.init != NULL)
1981                         pr_warning("zcache: frontswap_ops overridden");
1982         }
1983 #endif
1984 out:
1985         return ret;
1986 }
1987
1988 module_init(zcache_init)