2a840519405633ec2ee69f8d9c825e64c7eba406
[pandora-kernel.git] / net / sunrpc / cache.c
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <linux/smp_lock.h>
32 #include <asm/ioctls.h>
33 #include <linux/sunrpc/types.h>
34 #include <linux/sunrpc/cache.h>
35 #include <linux/sunrpc/stats.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
37
38 #define  RPCDBG_FACILITY RPCDBG_CACHE
39
40 static int cache_defer_req(struct cache_req *req, struct cache_head *item);
41 static void cache_revisit_request(struct cache_head *item);
42
43 static void cache_init(struct cache_head *h)
44 {
45         time_t now = seconds_since_boot();
46         h->next = NULL;
47         h->flags = 0;
48         kref_init(&h->ref);
49         h->expiry_time = now + CACHE_NEW_EXPIRY;
50         h->last_refresh = now;
51 }
52
53 static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
54 {
55         return  (h->expiry_time < seconds_since_boot()) ||
56                 (detail->flush_time > h->last_refresh);
57 }
58
59 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
60                                        struct cache_head *key, int hash)
61 {
62         struct cache_head **head,  **hp;
63         struct cache_head *new = NULL, *freeme = NULL;
64
65         head = &detail->hash_table[hash];
66
67         read_lock(&detail->hash_lock);
68
69         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
70                 struct cache_head *tmp = *hp;
71                 if (detail->match(tmp, key)) {
72                         if (cache_is_expired(detail, tmp))
73                                 /* This entry is expired, we will discard it. */
74                                 break;
75                         cache_get(tmp);
76                         read_unlock(&detail->hash_lock);
77                         return tmp;
78                 }
79         }
80         read_unlock(&detail->hash_lock);
81         /* Didn't find anything, insert an empty entry */
82
83         new = detail->alloc();
84         if (!new)
85                 return NULL;
86         /* must fully initialise 'new', else
87          * we might get lose if we need to
88          * cache_put it soon.
89          */
90         cache_init(new);
91         detail->init(new, key);
92
93         write_lock(&detail->hash_lock);
94
95         /* check if entry appeared while we slept */
96         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
97                 struct cache_head *tmp = *hp;
98                 if (detail->match(tmp, key)) {
99                         if (cache_is_expired(detail, tmp)) {
100                                 *hp = tmp->next;
101                                 tmp->next = NULL;
102                                 detail->entries --;
103                                 freeme = tmp;
104                                 break;
105                         }
106                         cache_get(tmp);
107                         write_unlock(&detail->hash_lock);
108                         cache_put(new, detail);
109                         return tmp;
110                 }
111         }
112         new->next = *head;
113         *head = new;
114         detail->entries++;
115         cache_get(new);
116         write_unlock(&detail->hash_lock);
117
118         if (freeme)
119                 cache_put(freeme, detail);
120         return new;
121 }
122 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
123
124
125 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
126
127 static void cache_fresh_locked(struct cache_head *head, time_t expiry)
128 {
129         head->expiry_time = expiry;
130         head->last_refresh = seconds_since_boot();
131         set_bit(CACHE_VALID, &head->flags);
132 }
133
134 static void cache_fresh_unlocked(struct cache_head *head,
135                                  struct cache_detail *detail)
136 {
137         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
138                 cache_revisit_request(head);
139                 cache_dequeue(detail, head);
140         }
141 }
142
143 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
144                                        struct cache_head *new, struct cache_head *old, int hash)
145 {
146         /* The 'old' entry is to be replaced by 'new'.
147          * If 'old' is not VALID, we update it directly,
148          * otherwise we need to replace it
149          */
150         struct cache_head **head;
151         struct cache_head *tmp;
152
153         if (!test_bit(CACHE_VALID, &old->flags)) {
154                 write_lock(&detail->hash_lock);
155                 if (!test_bit(CACHE_VALID, &old->flags)) {
156                         if (test_bit(CACHE_NEGATIVE, &new->flags))
157                                 set_bit(CACHE_NEGATIVE, &old->flags);
158                         else
159                                 detail->update(old, new);
160                         cache_fresh_locked(old, new->expiry_time);
161                         write_unlock(&detail->hash_lock);
162                         cache_fresh_unlocked(old, detail);
163                         return old;
164                 }
165                 write_unlock(&detail->hash_lock);
166         }
167         /* We need to insert a new entry */
168         tmp = detail->alloc();
169         if (!tmp) {
170                 cache_put(old, detail);
171                 return NULL;
172         }
173         cache_init(tmp);
174         detail->init(tmp, old);
175         head = &detail->hash_table[hash];
176
177         write_lock(&detail->hash_lock);
178         if (test_bit(CACHE_NEGATIVE, &new->flags))
179                 set_bit(CACHE_NEGATIVE, &tmp->flags);
180         else
181                 detail->update(tmp, new);
182         tmp->next = *head;
183         *head = tmp;
184         detail->entries++;
185         cache_get(tmp);
186         cache_fresh_locked(tmp, new->expiry_time);
187         cache_fresh_locked(old, 0);
188         write_unlock(&detail->hash_lock);
189         cache_fresh_unlocked(tmp, detail);
190         cache_fresh_unlocked(old, detail);
191         cache_put(old, detail);
192         return tmp;
193 }
194 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
195
196 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
197 {
198         if (!cd->cache_upcall)
199                 return -EINVAL;
200         return cd->cache_upcall(cd, h);
201 }
202
203 static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
204 {
205         if (!test_bit(CACHE_VALID, &h->flags))
206                 return -EAGAIN;
207         else {
208                 /* entry is valid */
209                 if (test_bit(CACHE_NEGATIVE, &h->flags))
210                         return -ENOENT;
211                 else
212                         return 0;
213         }
214 }
215
216 /*
217  * This is the generic cache management routine for all
218  * the authentication caches.
219  * It checks the currency of a cache item and will (later)
220  * initiate an upcall to fill it if needed.
221  *
222  *
223  * Returns 0 if the cache_head can be used, or cache_puts it and returns
224  * -EAGAIN if upcall is pending and request has been queued
225  * -ETIMEDOUT if upcall failed or request could not be queue or
226  *           upcall completed but item is still invalid (implying that
227  *           the cache item has been replaced with a newer one).
228  * -ENOENT if cache entry was negative
229  */
230 int cache_check(struct cache_detail *detail,
231                     struct cache_head *h, struct cache_req *rqstp)
232 {
233         int rv;
234         long refresh_age, age;
235
236         /* First decide return status as best we can */
237         rv = cache_is_valid(detail, h);
238
239         /* now see if we want to start an upcall */
240         refresh_age = (h->expiry_time - h->last_refresh);
241         age = seconds_since_boot() - h->last_refresh;
242
243         if (rqstp == NULL) {
244                 if (rv == -EAGAIN)
245                         rv = -ENOENT;
246         } else if (rv == -EAGAIN || age > refresh_age/2) {
247                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
248                                 refresh_age, age);
249                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
250                         switch (cache_make_upcall(detail, h)) {
251                         case -EINVAL:
252                                 clear_bit(CACHE_PENDING, &h->flags);
253                                 cache_revisit_request(h);
254                                 if (rv == -EAGAIN) {
255                                         set_bit(CACHE_NEGATIVE, &h->flags);
256                                         cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
257                                         cache_fresh_unlocked(h, detail);
258                                         rv = -ENOENT;
259                                 }
260                                 break;
261
262                         case -EAGAIN:
263                                 clear_bit(CACHE_PENDING, &h->flags);
264                                 cache_revisit_request(h);
265                                 break;
266                         }
267                 }
268         }
269
270         if (rv == -EAGAIN) {
271                 if (cache_defer_req(rqstp, h) < 0) {
272                         /* Request is not deferred */
273                         rv = cache_is_valid(detail, h);
274                         if (rv == -EAGAIN)
275                                 rv = -ETIMEDOUT;
276                 }
277         }
278         if (rv)
279                 cache_put(h, detail);
280         return rv;
281 }
282 EXPORT_SYMBOL_GPL(cache_check);
283
284 /*
285  * caches need to be periodically cleaned.
286  * For this we maintain a list of cache_detail and
287  * a current pointer into that list and into the table
288  * for that entry.
289  *
290  * Each time clean_cache is called it finds the next non-empty entry
291  * in the current table and walks the list in that entry
292  * looking for entries that can be removed.
293  *
294  * An entry gets removed if:
295  * - The expiry is before current time
296  * - The last_refresh time is before the flush_time for that cache
297  *
298  * later we might drop old entries with non-NEVER expiry if that table
299  * is getting 'full' for some definition of 'full'
300  *
301  * The question of "how often to scan a table" is an interesting one
302  * and is answered in part by the use of the "nextcheck" field in the
303  * cache_detail.
304  * When a scan of a table begins, the nextcheck field is set to a time
305  * that is well into the future.
306  * While scanning, if an expiry time is found that is earlier than the
307  * current nextcheck time, nextcheck is set to that expiry time.
308  * If the flush_time is ever set to a time earlier than the nextcheck
309  * time, the nextcheck time is then set to that flush_time.
310  *
311  * A table is then only scanned if the current time is at least
312  * the nextcheck time.
313  *
314  */
315
316 static LIST_HEAD(cache_list);
317 static DEFINE_SPINLOCK(cache_list_lock);
318 static struct cache_detail *current_detail;
319 static int current_index;
320
321 static void do_cache_clean(struct work_struct *work);
322 static struct delayed_work cache_cleaner;
323
324 static void sunrpc_init_cache_detail(struct cache_detail *cd)
325 {
326         rwlock_init(&cd->hash_lock);
327         INIT_LIST_HEAD(&cd->queue);
328         spin_lock(&cache_list_lock);
329         cd->nextcheck = 0;
330         cd->entries = 0;
331         atomic_set(&cd->readers, 0);
332         cd->last_close = 0;
333         cd->last_warn = -1;
334         list_add(&cd->others, &cache_list);
335         spin_unlock(&cache_list_lock);
336
337         /* start the cleaning process */
338         schedule_delayed_work(&cache_cleaner, 0);
339 }
340
341 static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
342 {
343         cache_purge(cd);
344         spin_lock(&cache_list_lock);
345         write_lock(&cd->hash_lock);
346         if (cd->entries || atomic_read(&cd->inuse)) {
347                 write_unlock(&cd->hash_lock);
348                 spin_unlock(&cache_list_lock);
349                 goto out;
350         }
351         if (current_detail == cd)
352                 current_detail = NULL;
353         list_del_init(&cd->others);
354         write_unlock(&cd->hash_lock);
355         spin_unlock(&cache_list_lock);
356         if (list_empty(&cache_list)) {
357                 /* module must be being unloaded so its safe to kill the worker */
358                 cancel_delayed_work_sync(&cache_cleaner);
359         }
360         return;
361 out:
362         printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
363 }
364
365 /* clean cache tries to find something to clean
366  * and cleans it.
367  * It returns 1 if it cleaned something,
368  *            0 if it didn't find anything this time
369  *           -1 if it fell off the end of the list.
370  */
371 static int cache_clean(void)
372 {
373         int rv = 0;
374         struct list_head *next;
375
376         spin_lock(&cache_list_lock);
377
378         /* find a suitable table if we don't already have one */
379         while (current_detail == NULL ||
380             current_index >= current_detail->hash_size) {
381                 if (current_detail)
382                         next = current_detail->others.next;
383                 else
384                         next = cache_list.next;
385                 if (next == &cache_list) {
386                         current_detail = NULL;
387                         spin_unlock(&cache_list_lock);
388                         return -1;
389                 }
390                 current_detail = list_entry(next, struct cache_detail, others);
391                 if (current_detail->nextcheck > seconds_since_boot())
392                         current_index = current_detail->hash_size;
393                 else {
394                         current_index = 0;
395                         current_detail->nextcheck = seconds_since_boot()+30*60;
396                 }
397         }
398
399         /* find a non-empty bucket in the table */
400         while (current_detail &&
401                current_index < current_detail->hash_size &&
402                current_detail->hash_table[current_index] == NULL)
403                 current_index++;
404
405         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
406
407         if (current_detail && current_index < current_detail->hash_size) {
408                 struct cache_head *ch, **cp;
409                 struct cache_detail *d;
410
411                 write_lock(&current_detail->hash_lock);
412
413                 /* Ok, now to clean this strand */
414
415                 cp = & current_detail->hash_table[current_index];
416                 for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
417                         if (current_detail->nextcheck > ch->expiry_time)
418                                 current_detail->nextcheck = ch->expiry_time+1;
419                         if (!cache_is_expired(current_detail, ch))
420                                 continue;
421
422                         *cp = ch->next;
423                         ch->next = NULL;
424                         current_detail->entries--;
425                         rv = 1;
426                         break;
427                 }
428
429                 write_unlock(&current_detail->hash_lock);
430                 d = current_detail;
431                 if (!ch)
432                         current_index ++;
433                 spin_unlock(&cache_list_lock);
434                 if (ch) {
435                         if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
436                                 cache_dequeue(current_detail, ch);
437                         cache_revisit_request(ch);
438                         cache_put(ch, d);
439                 }
440         } else
441                 spin_unlock(&cache_list_lock);
442
443         return rv;
444 }
445
446 /*
447  * We want to regularly clean the cache, so we need to schedule some work ...
448  */
449 static void do_cache_clean(struct work_struct *work)
450 {
451         int delay = 5;
452         if (cache_clean() == -1)
453                 delay = round_jiffies_relative(30*HZ);
454
455         if (list_empty(&cache_list))
456                 delay = 0;
457
458         if (delay)
459                 schedule_delayed_work(&cache_cleaner, delay);
460 }
461
462
463 /*
464  * Clean all caches promptly.  This just calls cache_clean
465  * repeatedly until we are sure that every cache has had a chance to
466  * be fully cleaned
467  */
468 void cache_flush(void)
469 {
470         while (cache_clean() != -1)
471                 cond_resched();
472         while (cache_clean() != -1)
473                 cond_resched();
474 }
475 EXPORT_SYMBOL_GPL(cache_flush);
476
477 void cache_purge(struct cache_detail *detail)
478 {
479         detail->flush_time = LONG_MAX;
480         detail->nextcheck = seconds_since_boot();
481         cache_flush();
482         detail->flush_time = 1;
483 }
484 EXPORT_SYMBOL_GPL(cache_purge);
485
486
487 /*
488  * Deferral and Revisiting of Requests.
489  *
490  * If a cache lookup finds a pending entry, we
491  * need to defer the request and revisit it later.
492  * All deferred requests are stored in a hash table,
493  * indexed by "struct cache_head *".
494  * As it may be wasteful to store a whole request
495  * structure, we allow the request to provide a
496  * deferred form, which must contain a
497  * 'struct cache_deferred_req'
498  * This cache_deferred_req contains a method to allow
499  * it to be revisited when cache info is available
500  */
501
502 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
503 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
504
505 #define DFR_MAX 300     /* ??? */
506
507 static DEFINE_SPINLOCK(cache_defer_lock);
508 static LIST_HEAD(cache_defer_list);
509 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
510 static int cache_defer_cnt;
511
512 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
513 {
514         list_del_init(&dreq->recent);
515         hlist_del_init(&dreq->hash);
516         cache_defer_cnt--;
517 }
518
519 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
520 {
521         int hash = DFR_HASH(item);
522
523         list_add(&dreq->recent, &cache_defer_list);
524         hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
525 }
526
527 static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
528 {
529         struct cache_deferred_req *discard;
530
531         dreq->item = item;
532
533         spin_lock(&cache_defer_lock);
534
535         __hash_deferred_req(dreq, item);
536
537         /* it is in, now maybe clean up */
538         discard = NULL;
539         if (++cache_defer_cnt > DFR_MAX) {
540                 discard = list_entry(cache_defer_list.prev,
541                                      struct cache_deferred_req, recent);
542                 __unhash_deferred_req(discard);
543         }
544         spin_unlock(&cache_defer_lock);
545
546         if (discard)
547                 /* there was one too many */
548                 discard->revisit(discard, 1);
549
550         if (!test_bit(CACHE_PENDING, &item->flags)) {
551                 /* must have just been validated... */
552                 cache_revisit_request(item);
553                 return -EAGAIN;
554         }
555         return 0;
556 }
557
558 struct thread_deferred_req {
559         struct cache_deferred_req handle;
560         struct completion completion;
561 };
562
563 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
564 {
565         struct thread_deferred_req *dr =
566                 container_of(dreq, struct thread_deferred_req, handle);
567         complete(&dr->completion);
568 }
569
570 static int cache_wait_req(struct cache_req *req, struct cache_head *item)
571 {
572         struct thread_deferred_req sleeper;
573         struct cache_deferred_req *dreq = &sleeper.handle;
574         int ret;
575
576         sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
577         dreq->revisit = cache_restart_thread;
578
579         ret = setup_deferral(dreq, item);
580         if (ret)
581                 return ret;
582
583         if (wait_for_completion_interruptible_timeout(
584                     &sleeper.completion, req->thread_wait) <= 0) {
585                 /* The completion wasn't completed, so we need
586                  * to clean up
587                  */
588                 spin_lock(&cache_defer_lock);
589                 if (!hlist_unhashed(&sleeper.handle.hash)) {
590                         __unhash_deferred_req(&sleeper.handle);
591                         spin_unlock(&cache_defer_lock);
592                 } else {
593                         /* cache_revisit_request already removed
594                          * this from the hash table, but hasn't
595                          * called ->revisit yet.  It will very soon
596                          * and we need to wait for it.
597                          */
598                         spin_unlock(&cache_defer_lock);
599                         wait_for_completion(&sleeper.completion);
600                 }
601         }
602         if (test_bit(CACHE_PENDING, &item->flags)) {
603                 /* item is still pending, try request
604                  * deferral
605                  */
606                 return -ETIMEDOUT;
607         }
608         /* only return success if we actually deferred the
609          * request.  In this case we waited until it was
610          * answered so no deferral has happened - rather
611          * an answer already exists.
612          */
613         return -EEXIST;
614 }
615
616 static int cache_defer_req(struct cache_req *req, struct cache_head *item)
617 {
618         struct cache_deferred_req *dreq;
619         int ret;
620
621         if (cache_defer_cnt >= DFR_MAX) {
622                 /* too much in the cache, randomly drop this one,
623                  * or continue and drop the oldest
624                  */
625                 if (net_random()&1)
626                         return -ENOMEM;
627         }
628         if (req->thread_wait) {
629                 ret = cache_wait_req(req, item);
630                 if (ret != -ETIMEDOUT)
631                         return ret;
632         }
633         dreq = req->defer(req);
634         if (dreq == NULL)
635                 return -ENOMEM;
636         return setup_deferral(dreq, item);
637 }
638
639 static void cache_revisit_request(struct cache_head *item)
640 {
641         struct cache_deferred_req *dreq;
642         struct list_head pending;
643         struct hlist_node *lp, *tmp;
644         int hash = DFR_HASH(item);
645
646         INIT_LIST_HEAD(&pending);
647         spin_lock(&cache_defer_lock);
648
649         hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
650                 if (dreq->item == item) {
651                         __unhash_deferred_req(dreq);
652                         list_add(&dreq->recent, &pending);
653                 }
654
655         spin_unlock(&cache_defer_lock);
656
657         while (!list_empty(&pending)) {
658                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
659                 list_del_init(&dreq->recent);
660                 dreq->revisit(dreq, 0);
661         }
662 }
663
664 void cache_clean_deferred(void *owner)
665 {
666         struct cache_deferred_req *dreq, *tmp;
667         struct list_head pending;
668
669
670         INIT_LIST_HEAD(&pending);
671         spin_lock(&cache_defer_lock);
672
673         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
674                 if (dreq->owner == owner)
675                         __unhash_deferred_req(dreq);
676         }
677         spin_unlock(&cache_defer_lock);
678
679         while (!list_empty(&pending)) {
680                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
681                 list_del_init(&dreq->recent);
682                 dreq->revisit(dreq, 1);
683         }
684 }
685
686 /*
687  * communicate with user-space
688  *
689  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
690  * On read, you get a full request, or block.
691  * On write, an update request is processed.
692  * Poll works if anything to read, and always allows write.
693  *
694  * Implemented by linked list of requests.  Each open file has
695  * a ->private that also exists in this list.  New requests are added
696  * to the end and may wakeup and preceding readers.
697  * New readers are added to the head.  If, on read, an item is found with
698  * CACHE_UPCALLING clear, we free it from the list.
699  *
700  */
701
702 static DEFINE_SPINLOCK(queue_lock);
703 static DEFINE_MUTEX(queue_io_mutex);
704
705 struct cache_queue {
706         struct list_head        list;
707         int                     reader; /* if 0, then request */
708 };
709 struct cache_request {
710         struct cache_queue      q;
711         struct cache_head       *item;
712         char                    * buf;
713         int                     len;
714         int                     readers;
715 };
716 struct cache_reader {
717         struct cache_queue      q;
718         int                     offset; /* if non-0, we have a refcnt on next request */
719 };
720
721 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
722                           loff_t *ppos, struct cache_detail *cd)
723 {
724         struct cache_reader *rp = filp->private_data;
725         struct cache_request *rq;
726         struct inode *inode = filp->f_path.dentry->d_inode;
727         int err;
728
729         if (count == 0)
730                 return 0;
731
732         mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
733                               * readers on this file */
734  again:
735         spin_lock(&queue_lock);
736         /* need to find next request */
737         while (rp->q.list.next != &cd->queue &&
738                list_entry(rp->q.list.next, struct cache_queue, list)
739                ->reader) {
740                 struct list_head *next = rp->q.list.next;
741                 list_move(&rp->q.list, next);
742         }
743         if (rp->q.list.next == &cd->queue) {
744                 spin_unlock(&queue_lock);
745                 mutex_unlock(&inode->i_mutex);
746                 BUG_ON(rp->offset);
747                 return 0;
748         }
749         rq = container_of(rp->q.list.next, struct cache_request, q.list);
750         BUG_ON(rq->q.reader);
751         if (rp->offset == 0)
752                 rq->readers++;
753         spin_unlock(&queue_lock);
754
755         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
756                 err = -EAGAIN;
757                 spin_lock(&queue_lock);
758                 list_move(&rp->q.list, &rq->q.list);
759                 spin_unlock(&queue_lock);
760         } else {
761                 if (rp->offset + count > rq->len)
762                         count = rq->len - rp->offset;
763                 err = -EFAULT;
764                 if (copy_to_user(buf, rq->buf + rp->offset, count))
765                         goto out;
766                 rp->offset += count;
767                 if (rp->offset >= rq->len) {
768                         rp->offset = 0;
769                         spin_lock(&queue_lock);
770                         list_move(&rp->q.list, &rq->q.list);
771                         spin_unlock(&queue_lock);
772                 }
773                 err = 0;
774         }
775  out:
776         if (rp->offset == 0) {
777                 /* need to release rq */
778                 spin_lock(&queue_lock);
779                 rq->readers--;
780                 if (rq->readers == 0 &&
781                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
782                         list_del(&rq->q.list);
783                         spin_unlock(&queue_lock);
784                         cache_put(rq->item, cd);
785                         kfree(rq->buf);
786                         kfree(rq);
787                 } else
788                         spin_unlock(&queue_lock);
789         }
790         if (err == -EAGAIN)
791                 goto again;
792         mutex_unlock(&inode->i_mutex);
793         return err ? err :  count;
794 }
795
796 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
797                                  size_t count, struct cache_detail *cd)
798 {
799         ssize_t ret;
800
801         if (copy_from_user(kaddr, buf, count))
802                 return -EFAULT;
803         kaddr[count] = '\0';
804         ret = cd->cache_parse(cd, kaddr, count);
805         if (!ret)
806                 ret = count;
807         return ret;
808 }
809
810 static ssize_t cache_slow_downcall(const char __user *buf,
811                                    size_t count, struct cache_detail *cd)
812 {
813         static char write_buf[8192]; /* protected by queue_io_mutex */
814         ssize_t ret = -EINVAL;
815
816         if (count >= sizeof(write_buf))
817                 goto out;
818         mutex_lock(&queue_io_mutex);
819         ret = cache_do_downcall(write_buf, buf, count, cd);
820         mutex_unlock(&queue_io_mutex);
821 out:
822         return ret;
823 }
824
825 static ssize_t cache_downcall(struct address_space *mapping,
826                               const char __user *buf,
827                               size_t count, struct cache_detail *cd)
828 {
829         struct page *page;
830         char *kaddr;
831         ssize_t ret = -ENOMEM;
832
833         if (count >= PAGE_CACHE_SIZE)
834                 goto out_slow;
835
836         page = find_or_create_page(mapping, 0, GFP_KERNEL);
837         if (!page)
838                 goto out_slow;
839
840         kaddr = kmap(page);
841         ret = cache_do_downcall(kaddr, buf, count, cd);
842         kunmap(page);
843         unlock_page(page);
844         page_cache_release(page);
845         return ret;
846 out_slow:
847         return cache_slow_downcall(buf, count, cd);
848 }
849
850 static ssize_t cache_write(struct file *filp, const char __user *buf,
851                            size_t count, loff_t *ppos,
852                            struct cache_detail *cd)
853 {
854         struct address_space *mapping = filp->f_mapping;
855         struct inode *inode = filp->f_path.dentry->d_inode;
856         ssize_t ret = -EINVAL;
857
858         if (!cd->cache_parse)
859                 goto out;
860
861         mutex_lock(&inode->i_mutex);
862         ret = cache_downcall(mapping, buf, count, cd);
863         mutex_unlock(&inode->i_mutex);
864 out:
865         return ret;
866 }
867
868 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
869
870 static unsigned int cache_poll(struct file *filp, poll_table *wait,
871                                struct cache_detail *cd)
872 {
873         unsigned int mask;
874         struct cache_reader *rp = filp->private_data;
875         struct cache_queue *cq;
876
877         poll_wait(filp, &queue_wait, wait);
878
879         /* alway allow write */
880         mask = POLL_OUT | POLLWRNORM;
881
882         if (!rp)
883                 return mask;
884
885         spin_lock(&queue_lock);
886
887         for (cq= &rp->q; &cq->list != &cd->queue;
888              cq = list_entry(cq->list.next, struct cache_queue, list))
889                 if (!cq->reader) {
890                         mask |= POLLIN | POLLRDNORM;
891                         break;
892                 }
893         spin_unlock(&queue_lock);
894         return mask;
895 }
896
897 static int cache_ioctl(struct inode *ino, struct file *filp,
898                        unsigned int cmd, unsigned long arg,
899                        struct cache_detail *cd)
900 {
901         int len = 0;
902         struct cache_reader *rp = filp->private_data;
903         struct cache_queue *cq;
904
905         if (cmd != FIONREAD || !rp)
906                 return -EINVAL;
907
908         spin_lock(&queue_lock);
909
910         /* only find the length remaining in current request,
911          * or the length of the next request
912          */
913         for (cq= &rp->q; &cq->list != &cd->queue;
914              cq = list_entry(cq->list.next, struct cache_queue, list))
915                 if (!cq->reader) {
916                         struct cache_request *cr =
917                                 container_of(cq, struct cache_request, q);
918                         len = cr->len - rp->offset;
919                         break;
920                 }
921         spin_unlock(&queue_lock);
922
923         return put_user(len, (int __user *)arg);
924 }
925
926 static int cache_open(struct inode *inode, struct file *filp,
927                       struct cache_detail *cd)
928 {
929         struct cache_reader *rp = NULL;
930
931         if (!cd || !try_module_get(cd->owner))
932                 return -EACCES;
933         nonseekable_open(inode, filp);
934         if (filp->f_mode & FMODE_READ) {
935                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
936                 if (!rp)
937                         return -ENOMEM;
938                 rp->offset = 0;
939                 rp->q.reader = 1;
940                 atomic_inc(&cd->readers);
941                 spin_lock(&queue_lock);
942                 list_add(&rp->q.list, &cd->queue);
943                 spin_unlock(&queue_lock);
944         }
945         filp->private_data = rp;
946         return 0;
947 }
948
949 static int cache_release(struct inode *inode, struct file *filp,
950                          struct cache_detail *cd)
951 {
952         struct cache_reader *rp = filp->private_data;
953
954         if (rp) {
955                 spin_lock(&queue_lock);
956                 if (rp->offset) {
957                         struct cache_queue *cq;
958                         for (cq= &rp->q; &cq->list != &cd->queue;
959                              cq = list_entry(cq->list.next, struct cache_queue, list))
960                                 if (!cq->reader) {
961                                         container_of(cq, struct cache_request, q)
962                                                 ->readers--;
963                                         break;
964                                 }
965                         rp->offset = 0;
966                 }
967                 list_del(&rp->q.list);
968                 spin_unlock(&queue_lock);
969
970                 filp->private_data = NULL;
971                 kfree(rp);
972
973                 cd->last_close = seconds_since_boot();
974                 atomic_dec(&cd->readers);
975         }
976         module_put(cd->owner);
977         return 0;
978 }
979
980
981
982 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
983 {
984         struct cache_queue *cq;
985         spin_lock(&queue_lock);
986         list_for_each_entry(cq, &detail->queue, list)
987                 if (!cq->reader) {
988                         struct cache_request *cr = container_of(cq, struct cache_request, q);
989                         if (cr->item != ch)
990                                 continue;
991                         if (cr->readers != 0)
992                                 continue;
993                         list_del(&cr->q.list);
994                         spin_unlock(&queue_lock);
995                         cache_put(cr->item, detail);
996                         kfree(cr->buf);
997                         kfree(cr);
998                         return;
999                 }
1000         spin_unlock(&queue_lock);
1001 }
1002
1003 /*
1004  * Support routines for text-based upcalls.
1005  * Fields are separated by spaces.
1006  * Fields are either mangled to quote space tab newline slosh with slosh
1007  * or a hexified with a leading \x
1008  * Record is terminated with newline.
1009  *
1010  */
1011
1012 void qword_add(char **bpp, int *lp, char *str)
1013 {
1014         char *bp = *bpp;
1015         int len = *lp;
1016         char c;
1017
1018         if (len < 0) return;
1019
1020         while ((c=*str++) && len)
1021                 switch(c) {
1022                 case ' ':
1023                 case '\t':
1024                 case '\n':
1025                 case '\\':
1026                         if (len >= 4) {
1027                                 *bp++ = '\\';
1028                                 *bp++ = '0' + ((c & 0300)>>6);
1029                                 *bp++ = '0' + ((c & 0070)>>3);
1030                                 *bp++ = '0' + ((c & 0007)>>0);
1031                         }
1032                         len -= 4;
1033                         break;
1034                 default:
1035                         *bp++ = c;
1036                         len--;
1037                 }
1038         if (c || len <1) len = -1;
1039         else {
1040                 *bp++ = ' ';
1041                 len--;
1042         }
1043         *bpp = bp;
1044         *lp = len;
1045 }
1046 EXPORT_SYMBOL_GPL(qword_add);
1047
1048 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1049 {
1050         char *bp = *bpp;
1051         int len = *lp;
1052
1053         if (len < 0) return;
1054
1055         if (len > 2) {
1056                 *bp++ = '\\';
1057                 *bp++ = 'x';
1058                 len -= 2;
1059                 while (blen && len >= 2) {
1060                         unsigned char c = *buf++;
1061                         *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1062                         *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1063                         len -= 2;
1064                         blen--;
1065                 }
1066         }
1067         if (blen || len<1) len = -1;
1068         else {
1069                 *bp++ = ' ';
1070                 len--;
1071         }
1072         *bpp = bp;
1073         *lp = len;
1074 }
1075 EXPORT_SYMBOL_GPL(qword_addhex);
1076
1077 static void warn_no_listener(struct cache_detail *detail)
1078 {
1079         if (detail->last_warn != detail->last_close) {
1080                 detail->last_warn = detail->last_close;
1081                 if (detail->warn_no_listener)
1082                         detail->warn_no_listener(detail, detail->last_close != 0);
1083         }
1084 }
1085
1086 static bool cache_listeners_exist(struct cache_detail *detail)
1087 {
1088         if (atomic_read(&detail->readers))
1089                 return true;
1090         if (detail->last_close == 0)
1091                 /* This cache was never opened */
1092                 return false;
1093         if (detail->last_close < seconds_since_boot() - 30)
1094                 /*
1095                  * We allow for the possibility that someone might
1096                  * restart a userspace daemon without restarting the
1097                  * server; but after 30 seconds, we give up.
1098                  */
1099                  return false;
1100         return true;
1101 }
1102
1103 /*
1104  * register an upcall request to user-space and queue it up for read() by the
1105  * upcall daemon.
1106  *
1107  * Each request is at most one page long.
1108  */
1109 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1110                 void (*cache_request)(struct cache_detail *,
1111                                       struct cache_head *,
1112                                       char **,
1113                                       int *))
1114 {
1115
1116         char *buf;
1117         struct cache_request *crq;
1118         char *bp;
1119         int len;
1120
1121         if (!cache_listeners_exist(detail)) {
1122                 warn_no_listener(detail);
1123                 return -EINVAL;
1124         }
1125
1126         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1127         if (!buf)
1128                 return -EAGAIN;
1129
1130         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1131         if (!crq) {
1132                 kfree(buf);
1133                 return -EAGAIN;
1134         }
1135
1136         bp = buf; len = PAGE_SIZE;
1137
1138         cache_request(detail, h, &bp, &len);
1139
1140         if (len < 0) {
1141                 kfree(buf);
1142                 kfree(crq);
1143                 return -EAGAIN;
1144         }
1145         crq->q.reader = 0;
1146         crq->item = cache_get(h);
1147         crq->buf = buf;
1148         crq->len = PAGE_SIZE - len;
1149         crq->readers = 0;
1150         spin_lock(&queue_lock);
1151         list_add_tail(&crq->q.list, &detail->queue);
1152         spin_unlock(&queue_lock);
1153         wake_up(&queue_wait);
1154         return 0;
1155 }
1156 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1157
1158 /*
1159  * parse a message from user-space and pass it
1160  * to an appropriate cache
1161  * Messages are, like requests, separated into fields by
1162  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1163  *
1164  * Message is
1165  *   reply cachename expiry key ... content....
1166  *
1167  * key and content are both parsed by cache
1168  */
1169
1170 #define isodigit(c) (isdigit(c) && c <= '7')
1171 int qword_get(char **bpp, char *dest, int bufsize)
1172 {
1173         /* return bytes copied, or -1 on error */
1174         char *bp = *bpp;
1175         int len = 0;
1176
1177         while (*bp == ' ') bp++;
1178
1179         if (bp[0] == '\\' && bp[1] == 'x') {
1180                 /* HEX STRING */
1181                 bp += 2;
1182                 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1183                         int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1184                         bp++;
1185                         byte <<= 4;
1186                         byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1187                         *dest++ = byte;
1188                         bp++;
1189                         len++;
1190                 }
1191         } else {
1192                 /* text with \nnn octal quoting */
1193                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1194                         if (*bp == '\\' &&
1195                             isodigit(bp[1]) && (bp[1] <= '3') &&
1196                             isodigit(bp[2]) &&
1197                             isodigit(bp[3])) {
1198                                 int byte = (*++bp -'0');
1199                                 bp++;
1200                                 byte = (byte << 3) | (*bp++ - '0');
1201                                 byte = (byte << 3) | (*bp++ - '0');
1202                                 *dest++ = byte;
1203                                 len++;
1204                         } else {
1205                                 *dest++ = *bp++;
1206                                 len++;
1207                         }
1208                 }
1209         }
1210
1211         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1212                 return -1;
1213         while (*bp == ' ') bp++;
1214         *bpp = bp;
1215         *dest = '\0';
1216         return len;
1217 }
1218 EXPORT_SYMBOL_GPL(qword_get);
1219
1220
1221 /*
1222  * support /proc/sunrpc/cache/$CACHENAME/content
1223  * as a seqfile.
1224  * We call ->cache_show passing NULL for the item to
1225  * get a header, then pass each real item in the cache
1226  */
1227
1228 struct handle {
1229         struct cache_detail *cd;
1230 };
1231
1232 static void *c_start(struct seq_file *m, loff_t *pos)
1233         __acquires(cd->hash_lock)
1234 {
1235         loff_t n = *pos;
1236         unsigned hash, entry;
1237         struct cache_head *ch;
1238         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1239
1240
1241         read_lock(&cd->hash_lock);
1242         if (!n--)
1243                 return SEQ_START_TOKEN;
1244         hash = n >> 32;
1245         entry = n & ((1LL<<32) - 1);
1246
1247         for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1248                 if (!entry--)
1249                         return ch;
1250         n &= ~((1LL<<32) - 1);
1251         do {
1252                 hash++;
1253                 n += 1LL<<32;
1254         } while(hash < cd->hash_size &&
1255                 cd->hash_table[hash]==NULL);
1256         if (hash >= cd->hash_size)
1257                 return NULL;
1258         *pos = n+1;
1259         return cd->hash_table[hash];
1260 }
1261
1262 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1263 {
1264         struct cache_head *ch = p;
1265         int hash = (*pos >> 32);
1266         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1267
1268         if (p == SEQ_START_TOKEN)
1269                 hash = 0;
1270         else if (ch->next == NULL) {
1271                 hash++;
1272                 *pos += 1LL<<32;
1273         } else {
1274                 ++*pos;
1275                 return ch->next;
1276         }
1277         *pos &= ~((1LL<<32) - 1);
1278         while (hash < cd->hash_size &&
1279                cd->hash_table[hash] == NULL) {
1280                 hash++;
1281                 *pos += 1LL<<32;
1282         }
1283         if (hash >= cd->hash_size)
1284                 return NULL;
1285         ++*pos;
1286         return cd->hash_table[hash];
1287 }
1288
1289 static void c_stop(struct seq_file *m, void *p)
1290         __releases(cd->hash_lock)
1291 {
1292         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1293         read_unlock(&cd->hash_lock);
1294 }
1295
1296 static int c_show(struct seq_file *m, void *p)
1297 {
1298         struct cache_head *cp = p;
1299         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1300
1301         if (p == SEQ_START_TOKEN)
1302                 return cd->cache_show(m, cd, NULL);
1303
1304         ifdebug(CACHE)
1305                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1306                            convert_to_wallclock(cp->expiry_time),
1307                            atomic_read(&cp->ref.refcount), cp->flags);
1308         cache_get(cp);
1309         if (cache_check(cd, cp, NULL))
1310                 /* cache_check does a cache_put on failure */
1311                 seq_printf(m, "# ");
1312         else
1313                 cache_put(cp, cd);
1314
1315         return cd->cache_show(m, cd, cp);
1316 }
1317
1318 static const struct seq_operations cache_content_op = {
1319         .start  = c_start,
1320         .next   = c_next,
1321         .stop   = c_stop,
1322         .show   = c_show,
1323 };
1324
1325 static int content_open(struct inode *inode, struct file *file,
1326                         struct cache_detail *cd)
1327 {
1328         struct handle *han;
1329
1330         if (!cd || !try_module_get(cd->owner))
1331                 return -EACCES;
1332         han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1333         if (han == NULL) {
1334                 module_put(cd->owner);
1335                 return -ENOMEM;
1336         }
1337
1338         han->cd = cd;
1339         return 0;
1340 }
1341
1342 static int content_release(struct inode *inode, struct file *file,
1343                 struct cache_detail *cd)
1344 {
1345         int ret = seq_release_private(inode, file);
1346         module_put(cd->owner);
1347         return ret;
1348 }
1349
1350 static int open_flush(struct inode *inode, struct file *file,
1351                         struct cache_detail *cd)
1352 {
1353         if (!cd || !try_module_get(cd->owner))
1354                 return -EACCES;
1355         return nonseekable_open(inode, file);
1356 }
1357
1358 static int release_flush(struct inode *inode, struct file *file,
1359                         struct cache_detail *cd)
1360 {
1361         module_put(cd->owner);
1362         return 0;
1363 }
1364
1365 static ssize_t read_flush(struct file *file, char __user *buf,
1366                           size_t count, loff_t *ppos,
1367                           struct cache_detail *cd)
1368 {
1369         char tbuf[20];
1370         unsigned long p = *ppos;
1371         size_t len;
1372
1373         sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
1374         len = strlen(tbuf);
1375         if (p >= len)
1376                 return 0;
1377         len -= p;
1378         if (len > count)
1379                 len = count;
1380         if (copy_to_user(buf, (void*)(tbuf+p), len))
1381                 return -EFAULT;
1382         *ppos += len;
1383         return len;
1384 }
1385
1386 static ssize_t write_flush(struct file *file, const char __user *buf,
1387                            size_t count, loff_t *ppos,
1388                            struct cache_detail *cd)
1389 {
1390         char tbuf[20];
1391         char *bp, *ep;
1392
1393         if (*ppos || count > sizeof(tbuf)-1)
1394                 return -EINVAL;
1395         if (copy_from_user(tbuf, buf, count))
1396                 return -EFAULT;
1397         tbuf[count] = 0;
1398         simple_strtoul(tbuf, &ep, 0);
1399         if (*ep && *ep != '\n')
1400                 return -EINVAL;
1401
1402         bp = tbuf;
1403         cd->flush_time = get_expiry(&bp);
1404         cd->nextcheck = seconds_since_boot();
1405         cache_flush();
1406
1407         *ppos += count;
1408         return count;
1409 }
1410
1411 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1412                                  size_t count, loff_t *ppos)
1413 {
1414         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1415
1416         return cache_read(filp, buf, count, ppos, cd);
1417 }
1418
1419 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1420                                   size_t count, loff_t *ppos)
1421 {
1422         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1423
1424         return cache_write(filp, buf, count, ppos, cd);
1425 }
1426
1427 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1428 {
1429         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1430
1431         return cache_poll(filp, wait, cd);
1432 }
1433
1434 static long cache_ioctl_procfs(struct file *filp,
1435                                unsigned int cmd, unsigned long arg)
1436 {
1437         long ret;
1438         struct inode *inode = filp->f_path.dentry->d_inode;
1439         struct cache_detail *cd = PDE(inode)->data;
1440
1441         lock_kernel();
1442         ret = cache_ioctl(inode, filp, cmd, arg, cd);
1443         unlock_kernel();
1444
1445         return ret;
1446 }
1447
1448 static int cache_open_procfs(struct inode *inode, struct file *filp)
1449 {
1450         struct cache_detail *cd = PDE(inode)->data;
1451
1452         return cache_open(inode, filp, cd);
1453 }
1454
1455 static int cache_release_procfs(struct inode *inode, struct file *filp)
1456 {
1457         struct cache_detail *cd = PDE(inode)->data;
1458
1459         return cache_release(inode, filp, cd);
1460 }
1461
1462 static const struct file_operations cache_file_operations_procfs = {
1463         .owner          = THIS_MODULE,
1464         .llseek         = no_llseek,
1465         .read           = cache_read_procfs,
1466         .write          = cache_write_procfs,
1467         .poll           = cache_poll_procfs,
1468         .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1469         .open           = cache_open_procfs,
1470         .release        = cache_release_procfs,
1471 };
1472
1473 static int content_open_procfs(struct inode *inode, struct file *filp)
1474 {
1475         struct cache_detail *cd = PDE(inode)->data;
1476
1477         return content_open(inode, filp, cd);
1478 }
1479
1480 static int content_release_procfs(struct inode *inode, struct file *filp)
1481 {
1482         struct cache_detail *cd = PDE(inode)->data;
1483
1484         return content_release(inode, filp, cd);
1485 }
1486
1487 static const struct file_operations content_file_operations_procfs = {
1488         .open           = content_open_procfs,
1489         .read           = seq_read,
1490         .llseek         = seq_lseek,
1491         .release        = content_release_procfs,
1492 };
1493
1494 static int open_flush_procfs(struct inode *inode, struct file *filp)
1495 {
1496         struct cache_detail *cd = PDE(inode)->data;
1497
1498         return open_flush(inode, filp, cd);
1499 }
1500
1501 static int release_flush_procfs(struct inode *inode, struct file *filp)
1502 {
1503         struct cache_detail *cd = PDE(inode)->data;
1504
1505         return release_flush(inode, filp, cd);
1506 }
1507
1508 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1509                             size_t count, loff_t *ppos)
1510 {
1511         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1512
1513         return read_flush(filp, buf, count, ppos, cd);
1514 }
1515
1516 static ssize_t write_flush_procfs(struct file *filp,
1517                                   const char __user *buf,
1518                                   size_t count, loff_t *ppos)
1519 {
1520         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1521
1522         return write_flush(filp, buf, count, ppos, cd);
1523 }
1524
1525 static const struct file_operations cache_flush_operations_procfs = {
1526         .open           = open_flush_procfs,
1527         .read           = read_flush_procfs,
1528         .write          = write_flush_procfs,
1529         .release        = release_flush_procfs,
1530 };
1531
1532 static void remove_cache_proc_entries(struct cache_detail *cd)
1533 {
1534         if (cd->u.procfs.proc_ent == NULL)
1535                 return;
1536         if (cd->u.procfs.flush_ent)
1537                 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1538         if (cd->u.procfs.channel_ent)
1539                 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1540         if (cd->u.procfs.content_ent)
1541                 remove_proc_entry("content", cd->u.procfs.proc_ent);
1542         cd->u.procfs.proc_ent = NULL;
1543         remove_proc_entry(cd->name, proc_net_rpc);
1544 }
1545
1546 #ifdef CONFIG_PROC_FS
1547 static int create_cache_proc_entries(struct cache_detail *cd)
1548 {
1549         struct proc_dir_entry *p;
1550
1551         cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
1552         if (cd->u.procfs.proc_ent == NULL)
1553                 goto out_nomem;
1554         cd->u.procfs.channel_ent = NULL;
1555         cd->u.procfs.content_ent = NULL;
1556
1557         p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1558                              cd->u.procfs.proc_ent,
1559                              &cache_flush_operations_procfs, cd);
1560         cd->u.procfs.flush_ent = p;
1561         if (p == NULL)
1562                 goto out_nomem;
1563
1564         if (cd->cache_upcall || cd->cache_parse) {
1565                 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1566                                      cd->u.procfs.proc_ent,
1567                                      &cache_file_operations_procfs, cd);
1568                 cd->u.procfs.channel_ent = p;
1569                 if (p == NULL)
1570                         goto out_nomem;
1571         }
1572         if (cd->cache_show) {
1573                 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1574                                 cd->u.procfs.proc_ent,
1575                                 &content_file_operations_procfs, cd);
1576                 cd->u.procfs.content_ent = p;
1577                 if (p == NULL)
1578                         goto out_nomem;
1579         }
1580         return 0;
1581 out_nomem:
1582         remove_cache_proc_entries(cd);
1583         return -ENOMEM;
1584 }
1585 #else /* CONFIG_PROC_FS */
1586 static int create_cache_proc_entries(struct cache_detail *cd)
1587 {
1588         return 0;
1589 }
1590 #endif
1591
1592 void __init cache_initialize(void)
1593 {
1594         INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
1595 }
1596
1597 int cache_register(struct cache_detail *cd)
1598 {
1599         int ret;
1600
1601         sunrpc_init_cache_detail(cd);
1602         ret = create_cache_proc_entries(cd);
1603         if (ret)
1604                 sunrpc_destroy_cache_detail(cd);
1605         return ret;
1606 }
1607 EXPORT_SYMBOL_GPL(cache_register);
1608
1609 void cache_unregister(struct cache_detail *cd)
1610 {
1611         remove_cache_proc_entries(cd);
1612         sunrpc_destroy_cache_detail(cd);
1613 }
1614 EXPORT_SYMBOL_GPL(cache_unregister);
1615
1616 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1617                                  size_t count, loff_t *ppos)
1618 {
1619         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1620
1621         return cache_read(filp, buf, count, ppos, cd);
1622 }
1623
1624 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1625                                   size_t count, loff_t *ppos)
1626 {
1627         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1628
1629         return cache_write(filp, buf, count, ppos, cd);
1630 }
1631
1632 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1633 {
1634         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1635
1636         return cache_poll(filp, wait, cd);
1637 }
1638
1639 static long cache_ioctl_pipefs(struct file *filp,
1640                               unsigned int cmd, unsigned long arg)
1641 {
1642         struct inode *inode = filp->f_dentry->d_inode;
1643         struct cache_detail *cd = RPC_I(inode)->private;
1644         long ret;
1645
1646         lock_kernel();
1647         ret = cache_ioctl(inode, filp, cmd, arg, cd);
1648         unlock_kernel();
1649
1650         return ret;
1651 }
1652
1653 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1654 {
1655         struct cache_detail *cd = RPC_I(inode)->private;
1656
1657         return cache_open(inode, filp, cd);
1658 }
1659
1660 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1661 {
1662         struct cache_detail *cd = RPC_I(inode)->private;
1663
1664         return cache_release(inode, filp, cd);
1665 }
1666
1667 const struct file_operations cache_file_operations_pipefs = {
1668         .owner          = THIS_MODULE,
1669         .llseek         = no_llseek,
1670         .read           = cache_read_pipefs,
1671         .write          = cache_write_pipefs,
1672         .poll           = cache_poll_pipefs,
1673         .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1674         .open           = cache_open_pipefs,
1675         .release        = cache_release_pipefs,
1676 };
1677
1678 static int content_open_pipefs(struct inode *inode, struct file *filp)
1679 {
1680         struct cache_detail *cd = RPC_I(inode)->private;
1681
1682         return content_open(inode, filp, cd);
1683 }
1684
1685 static int content_release_pipefs(struct inode *inode, struct file *filp)
1686 {
1687         struct cache_detail *cd = RPC_I(inode)->private;
1688
1689         return content_release(inode, filp, cd);
1690 }
1691
1692 const struct file_operations content_file_operations_pipefs = {
1693         .open           = content_open_pipefs,
1694         .read           = seq_read,
1695         .llseek         = seq_lseek,
1696         .release        = content_release_pipefs,
1697 };
1698
1699 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1700 {
1701         struct cache_detail *cd = RPC_I(inode)->private;
1702
1703         return open_flush(inode, filp, cd);
1704 }
1705
1706 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1707 {
1708         struct cache_detail *cd = RPC_I(inode)->private;
1709
1710         return release_flush(inode, filp, cd);
1711 }
1712
1713 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1714                             size_t count, loff_t *ppos)
1715 {
1716         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1717
1718         return read_flush(filp, buf, count, ppos, cd);
1719 }
1720
1721 static ssize_t write_flush_pipefs(struct file *filp,
1722                                   const char __user *buf,
1723                                   size_t count, loff_t *ppos)
1724 {
1725         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1726
1727         return write_flush(filp, buf, count, ppos, cd);
1728 }
1729
1730 const struct file_operations cache_flush_operations_pipefs = {
1731         .open           = open_flush_pipefs,
1732         .read           = read_flush_pipefs,
1733         .write          = write_flush_pipefs,
1734         .release        = release_flush_pipefs,
1735 };
1736
1737 int sunrpc_cache_register_pipefs(struct dentry *parent,
1738                                  const char *name, mode_t umode,
1739                                  struct cache_detail *cd)
1740 {
1741         struct qstr q;
1742         struct dentry *dir;
1743         int ret = 0;
1744
1745         sunrpc_init_cache_detail(cd);
1746         q.name = name;
1747         q.len = strlen(name);
1748         q.hash = full_name_hash(q.name, q.len);
1749         dir = rpc_create_cache_dir(parent, &q, umode, cd);
1750         if (!IS_ERR(dir))
1751                 cd->u.pipefs.dir = dir;
1752         else {
1753                 sunrpc_destroy_cache_detail(cd);
1754                 ret = PTR_ERR(dir);
1755         }
1756         return ret;
1757 }
1758 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1759
1760 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1761 {
1762         rpc_remove_cache_dir(cd->u.pipefs.dir);
1763         cd->u.pipefs.dir = NULL;
1764         sunrpc_destroy_cache_detail(cd);
1765 }
1766 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1767