[PATCH] mempool: use mempool_create_slab_pool()
[pandora-kernel.git] / net / sunrpc / sched.c
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  * 
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11
12 #include <linux/module.h>
13
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22
23 #include <linux/sunrpc/clnt.h>
24 #include <linux/sunrpc/xprt.h>
25
26 #ifdef RPC_DEBUG
27 #define RPCDBG_FACILITY         RPCDBG_SCHED
28 #define RPC_TASK_MAGIC_ID       0xf00baa
29 static int                      rpc_task_id;
30 #endif
31
32 /*
33  * RPC slabs and memory pools
34  */
35 #define RPC_BUFFER_MAXSIZE      (2048)
36 #define RPC_BUFFER_POOLSIZE     (8)
37 #define RPC_TASK_POOLSIZE       (8)
38 static kmem_cache_t     *rpc_task_slabp __read_mostly;
39 static kmem_cache_t     *rpc_buffer_slabp __read_mostly;
40 static mempool_t        *rpc_task_mempool __read_mostly;
41 static mempool_t        *rpc_buffer_mempool __read_mostly;
42
43 static void                     __rpc_default_timer(struct rpc_task *task);
44 static void                     rpciod_killall(void);
45 static void                     rpc_async_schedule(void *);
46
47 /*
48  * RPC tasks that create another task (e.g. for contacting the portmapper)
49  * will wait on this queue for their child's completion
50  */
51 static RPC_WAITQ(childq, "childq");
52
53 /*
54  * RPC tasks sit here while waiting for conditions to improve.
55  */
56 static RPC_WAITQ(delay_queue, "delayq");
57
58 /*
59  * All RPC tasks are linked into this list
60  */
61 static LIST_HEAD(all_tasks);
62
63 /*
64  * rpciod-related stuff
65  */
66 static DEFINE_MUTEX(rpciod_mutex);
67 static unsigned int             rpciod_users;
68 struct workqueue_struct *rpciod_workqueue;
69
70 /*
71  * Spinlock for other critical sections of code.
72  */
73 static DEFINE_SPINLOCK(rpc_sched_lock);
74
75 /*
76  * Disable the timer for a given RPC task. Should be called with
77  * queue->lock and bh_disabled in order to avoid races within
78  * rpc_run_timer().
79  */
80 static inline void
81 __rpc_disable_timer(struct rpc_task *task)
82 {
83         dprintk("RPC: %4d disabling timer\n", task->tk_pid);
84         task->tk_timeout_fn = NULL;
85         task->tk_timeout = 0;
86 }
87
88 /*
89  * Run a timeout function.
90  * We use the callback in order to allow __rpc_wake_up_task()
91  * and friends to disable the timer synchronously on SMP systems
92  * without calling del_timer_sync(). The latter could cause a
93  * deadlock if called while we're holding spinlocks...
94  */
95 static void rpc_run_timer(struct rpc_task *task)
96 {
97         void (*callback)(struct rpc_task *);
98
99         callback = task->tk_timeout_fn;
100         task->tk_timeout_fn = NULL;
101         if (callback && RPC_IS_QUEUED(task)) {
102                 dprintk("RPC: %4d running timer\n", task->tk_pid);
103                 callback(task);
104         }
105         smp_mb__before_clear_bit();
106         clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
107         smp_mb__after_clear_bit();
108 }
109
110 /*
111  * Set up a timer for the current task.
112  */
113 static inline void
114 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
115 {
116         if (!task->tk_timeout)
117                 return;
118
119         dprintk("RPC: %4d setting alarm for %lu ms\n",
120                         task->tk_pid, task->tk_timeout * 1000 / HZ);
121
122         if (timer)
123                 task->tk_timeout_fn = timer;
124         else
125                 task->tk_timeout_fn = __rpc_default_timer;
126         set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
127         mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
128 }
129
130 /*
131  * Delete any timer for the current task. Because we use del_timer_sync(),
132  * this function should never be called while holding queue->lock.
133  */
134 static void
135 rpc_delete_timer(struct rpc_task *task)
136 {
137         if (RPC_IS_QUEUED(task))
138                 return;
139         if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
140                 del_singleshot_timer_sync(&task->tk_timer);
141                 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
142         }
143 }
144
145 /*
146  * Add new request to a priority queue.
147  */
148 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
149 {
150         struct list_head *q;
151         struct rpc_task *t;
152
153         INIT_LIST_HEAD(&task->u.tk_wait.links);
154         q = &queue->tasks[task->tk_priority];
155         if (unlikely(task->tk_priority > queue->maxpriority))
156                 q = &queue->tasks[queue->maxpriority];
157         list_for_each_entry(t, q, u.tk_wait.list) {
158                 if (t->tk_cookie == task->tk_cookie) {
159                         list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
160                         return;
161                 }
162         }
163         list_add_tail(&task->u.tk_wait.list, q);
164 }
165
166 /*
167  * Add new request to wait queue.
168  *
169  * Swapper tasks always get inserted at the head of the queue.
170  * This should avoid many nasty memory deadlocks and hopefully
171  * improve overall performance.
172  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
173  */
174 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
175 {
176         BUG_ON (RPC_IS_QUEUED(task));
177
178         if (RPC_IS_PRIORITY(queue))
179                 __rpc_add_wait_queue_priority(queue, task);
180         else if (RPC_IS_SWAPPER(task))
181                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
182         else
183                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
184         task->u.tk_wait.rpc_waitq = queue;
185         queue->qlen++;
186         rpc_set_queued(task);
187
188         dprintk("RPC: %4d added to queue %p \"%s\"\n",
189                                 task->tk_pid, queue, rpc_qname(queue));
190 }
191
192 /*
193  * Remove request from a priority queue.
194  */
195 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
196 {
197         struct rpc_task *t;
198
199         if (!list_empty(&task->u.tk_wait.links)) {
200                 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
201                 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
202                 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
203         }
204         list_del(&task->u.tk_wait.list);
205 }
206
207 /*
208  * Remove request from queue.
209  * Note: must be called with spin lock held.
210  */
211 static void __rpc_remove_wait_queue(struct rpc_task *task)
212 {
213         struct rpc_wait_queue *queue;
214         queue = task->u.tk_wait.rpc_waitq;
215
216         if (RPC_IS_PRIORITY(queue))
217                 __rpc_remove_wait_queue_priority(task);
218         else
219                 list_del(&task->u.tk_wait.list);
220         queue->qlen--;
221         dprintk("RPC: %4d removed from queue %p \"%s\"\n",
222                                 task->tk_pid, queue, rpc_qname(queue));
223 }
224
225 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
226 {
227         queue->priority = priority;
228         queue->count = 1 << (priority * 2);
229 }
230
231 static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
232 {
233         queue->cookie = cookie;
234         queue->nr = RPC_BATCH_COUNT;
235 }
236
237 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
238 {
239         rpc_set_waitqueue_priority(queue, queue->maxpriority);
240         rpc_set_waitqueue_cookie(queue, 0);
241 }
242
243 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
244 {
245         int i;
246
247         spin_lock_init(&queue->lock);
248         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
249                 INIT_LIST_HEAD(&queue->tasks[i]);
250         queue->maxpriority = maxprio;
251         rpc_reset_waitqueue_priority(queue);
252 #ifdef RPC_DEBUG
253         queue->name = qname;
254 #endif
255 }
256
257 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
258 {
259         __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
260 }
261
262 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
263 {
264         __rpc_init_priority_wait_queue(queue, qname, 0);
265 }
266 EXPORT_SYMBOL(rpc_init_wait_queue);
267
268 static int rpc_wait_bit_interruptible(void *word)
269 {
270         if (signal_pending(current))
271                 return -ERESTARTSYS;
272         schedule();
273         return 0;
274 }
275
276 /*
277  * Mark an RPC call as having completed by clearing the 'active' bit
278  */
279 static inline void rpc_mark_complete_task(struct rpc_task *task)
280 {
281         rpc_clear_active(task);
282         wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
283 }
284
285 /*
286  * Allow callers to wait for completion of an RPC call
287  */
288 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
289 {
290         if (action == NULL)
291                 action = rpc_wait_bit_interruptible;
292         return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
293                         action, TASK_INTERRUPTIBLE);
294 }
295 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
296
297 /*
298  * Make an RPC task runnable.
299  *
300  * Note: If the task is ASYNC, this must be called with 
301  * the spinlock held to protect the wait queue operation.
302  */
303 static void rpc_make_runnable(struct rpc_task *task)
304 {
305         int do_ret;
306
307         BUG_ON(task->tk_timeout_fn);
308         do_ret = rpc_test_and_set_running(task);
309         rpc_clear_queued(task);
310         if (do_ret)
311                 return;
312         if (RPC_IS_ASYNC(task)) {
313                 int status;
314
315                 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
316                 status = queue_work(task->tk_workqueue, &task->u.tk_work);
317                 if (status < 0) {
318                         printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
319                         task->tk_status = status;
320                         return;
321                 }
322         } else
323                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
324 }
325
326 /*
327  * Place a newly initialized task on the workqueue.
328  */
329 static inline void
330 rpc_schedule_run(struct rpc_task *task)
331 {
332         rpc_set_active(task);
333         rpc_make_runnable(task);
334 }
335
336 /*
337  * Prepare for sleeping on a wait queue.
338  * By always appending tasks to the list we ensure FIFO behavior.
339  * NB: An RPC task will only receive interrupt-driven events as long
340  * as it's on a wait queue.
341  */
342 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
343                         rpc_action action, rpc_action timer)
344 {
345         dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
346                                 rpc_qname(q), jiffies);
347
348         if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
349                 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
350                 return;
351         }
352
353         /* Mark the task as being activated if so needed */
354         rpc_set_active(task);
355
356         __rpc_add_wait_queue(q, task);
357
358         BUG_ON(task->tk_callback != NULL);
359         task->tk_callback = action;
360         __rpc_add_timer(task, timer);
361 }
362
363 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
364                                 rpc_action action, rpc_action timer)
365 {
366         /*
367          * Protect the queue operations.
368          */
369         spin_lock_bh(&q->lock);
370         __rpc_sleep_on(q, task, action, timer);
371         spin_unlock_bh(&q->lock);
372 }
373
374 /**
375  * __rpc_do_wake_up_task - wake up a single rpc_task
376  * @task: task to be woken up
377  *
378  * Caller must hold queue->lock, and have cleared the task queued flag.
379  */
380 static void __rpc_do_wake_up_task(struct rpc_task *task)
381 {
382         dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
383
384 #ifdef RPC_DEBUG
385         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
386 #endif
387         /* Has the task been executed yet? If not, we cannot wake it up! */
388         if (!RPC_IS_ACTIVATED(task)) {
389                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
390                 return;
391         }
392
393         __rpc_disable_timer(task);
394         __rpc_remove_wait_queue(task);
395
396         rpc_make_runnable(task);
397
398         dprintk("RPC:      __rpc_wake_up_task done\n");
399 }
400
401 /*
402  * Wake up the specified task
403  */
404 static void __rpc_wake_up_task(struct rpc_task *task)
405 {
406         if (rpc_start_wakeup(task)) {
407                 if (RPC_IS_QUEUED(task))
408                         __rpc_do_wake_up_task(task);
409                 rpc_finish_wakeup(task);
410         }
411 }
412
413 /*
414  * Default timeout handler if none specified by user
415  */
416 static void
417 __rpc_default_timer(struct rpc_task *task)
418 {
419         dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
420         task->tk_status = -ETIMEDOUT;
421         rpc_wake_up_task(task);
422 }
423
424 /*
425  * Wake up the specified task
426  */
427 void rpc_wake_up_task(struct rpc_task *task)
428 {
429         if (rpc_start_wakeup(task)) {
430                 if (RPC_IS_QUEUED(task)) {
431                         struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
432
433                         spin_lock_bh(&queue->lock);
434                         __rpc_do_wake_up_task(task);
435                         spin_unlock_bh(&queue->lock);
436                 }
437                 rpc_finish_wakeup(task);
438         }
439 }
440
441 /*
442  * Wake up the next task on a priority queue.
443  */
444 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
445 {
446         struct list_head *q;
447         struct rpc_task *task;
448
449         /*
450          * Service a batch of tasks from a single cookie.
451          */
452         q = &queue->tasks[queue->priority];
453         if (!list_empty(q)) {
454                 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
455                 if (queue->cookie == task->tk_cookie) {
456                         if (--queue->nr)
457                                 goto out;
458                         list_move_tail(&task->u.tk_wait.list, q);
459                 }
460                 /*
461                  * Check if we need to switch queues.
462                  */
463                 if (--queue->count)
464                         goto new_cookie;
465         }
466
467         /*
468          * Service the next queue.
469          */
470         do {
471                 if (q == &queue->tasks[0])
472                         q = &queue->tasks[queue->maxpriority];
473                 else
474                         q = q - 1;
475                 if (!list_empty(q)) {
476                         task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
477                         goto new_queue;
478                 }
479         } while (q != &queue->tasks[queue->priority]);
480
481         rpc_reset_waitqueue_priority(queue);
482         return NULL;
483
484 new_queue:
485         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
486 new_cookie:
487         rpc_set_waitqueue_cookie(queue, task->tk_cookie);
488 out:
489         __rpc_wake_up_task(task);
490         return task;
491 }
492
493 /*
494  * Wake up the next task on the wait queue.
495  */
496 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
497 {
498         struct rpc_task *task = NULL;
499
500         dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
501         spin_lock_bh(&queue->lock);
502         if (RPC_IS_PRIORITY(queue))
503                 task = __rpc_wake_up_next_priority(queue);
504         else {
505                 task_for_first(task, &queue->tasks[0])
506                         __rpc_wake_up_task(task);
507         }
508         spin_unlock_bh(&queue->lock);
509
510         return task;
511 }
512
513 /**
514  * rpc_wake_up - wake up all rpc_tasks
515  * @queue: rpc_wait_queue on which the tasks are sleeping
516  *
517  * Grabs queue->lock
518  */
519 void rpc_wake_up(struct rpc_wait_queue *queue)
520 {
521         struct rpc_task *task, *next;
522         struct list_head *head;
523
524         spin_lock_bh(&queue->lock);
525         head = &queue->tasks[queue->maxpriority];
526         for (;;) {
527                 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
528                         __rpc_wake_up_task(task);
529                 if (head == &queue->tasks[0])
530                         break;
531                 head--;
532         }
533         spin_unlock_bh(&queue->lock);
534 }
535
536 /**
537  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
538  * @queue: rpc_wait_queue on which the tasks are sleeping
539  * @status: status value to set
540  *
541  * Grabs queue->lock
542  */
543 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
544 {
545         struct rpc_task *task, *next;
546         struct list_head *head;
547
548         spin_lock_bh(&queue->lock);
549         head = &queue->tasks[queue->maxpriority];
550         for (;;) {
551                 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
552                         task->tk_status = status;
553                         __rpc_wake_up_task(task);
554                 }
555                 if (head == &queue->tasks[0])
556                         break;
557                 head--;
558         }
559         spin_unlock_bh(&queue->lock);
560 }
561
562 /*
563  * Run a task at a later time
564  */
565 static void     __rpc_atrun(struct rpc_task *);
566 void
567 rpc_delay(struct rpc_task *task, unsigned long delay)
568 {
569         task->tk_timeout = delay;
570         rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
571 }
572
573 static void
574 __rpc_atrun(struct rpc_task *task)
575 {
576         task->tk_status = 0;
577         rpc_wake_up_task(task);
578 }
579
580 /*
581  * Helper to call task->tk_ops->rpc_call_prepare
582  */
583 static void rpc_prepare_task(struct rpc_task *task)
584 {
585         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
586 }
587
588 /*
589  * Helper that calls task->tk_ops->rpc_call_done if it exists
590  */
591 void rpc_exit_task(struct rpc_task *task)
592 {
593         task->tk_action = NULL;
594         if (task->tk_ops->rpc_call_done != NULL) {
595                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
596                 if (task->tk_action != NULL) {
597                         WARN_ON(RPC_ASSASSINATED(task));
598                         /* Always release the RPC slot and buffer memory */
599                         xprt_release(task);
600                 }
601         }
602 }
603 EXPORT_SYMBOL(rpc_exit_task);
604
605 /*
606  * This is the RPC `scheduler' (or rather, the finite state machine).
607  */
608 static int __rpc_execute(struct rpc_task *task)
609 {
610         int             status = 0;
611
612         dprintk("RPC: %4d rpc_execute flgs %x\n",
613                                 task->tk_pid, task->tk_flags);
614
615         BUG_ON(RPC_IS_QUEUED(task));
616
617         for (;;) {
618                 /*
619                  * Garbage collection of pending timers...
620                  */
621                 rpc_delete_timer(task);
622
623                 /*
624                  * Execute any pending callback.
625                  */
626                 if (RPC_DO_CALLBACK(task)) {
627                         /* Define a callback save pointer */
628                         void (*save_callback)(struct rpc_task *);
629         
630                         /* 
631                          * If a callback exists, save it, reset it,
632                          * call it.
633                          * The save is needed to stop from resetting
634                          * another callback set within the callback handler
635                          * - Dave
636                          */
637                         save_callback=task->tk_callback;
638                         task->tk_callback=NULL;
639                         lock_kernel();
640                         save_callback(task);
641                         unlock_kernel();
642                 }
643
644                 /*
645                  * Perform the next FSM step.
646                  * tk_action may be NULL when the task has been killed
647                  * by someone else.
648                  */
649                 if (!RPC_IS_QUEUED(task)) {
650                         if (task->tk_action == NULL)
651                                 break;
652                         lock_kernel();
653                         task->tk_action(task);
654                         unlock_kernel();
655                 }
656
657                 /*
658                  * Lockless check for whether task is sleeping or not.
659                  */
660                 if (!RPC_IS_QUEUED(task))
661                         continue;
662                 rpc_clear_running(task);
663                 if (RPC_IS_ASYNC(task)) {
664                         /* Careful! we may have raced... */
665                         if (RPC_IS_QUEUED(task))
666                                 return 0;
667                         if (rpc_test_and_set_running(task))
668                                 return 0;
669                         continue;
670                 }
671
672                 /* sync task: sleep here */
673                 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
674                 /* Note: Caller should be using rpc_clnt_sigmask() */
675                 status = out_of_line_wait_on_bit(&task->tk_runstate,
676                                 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
677                                 TASK_INTERRUPTIBLE);
678                 if (status == -ERESTARTSYS) {
679                         /*
680                          * When a sync task receives a signal, it exits with
681                          * -ERESTARTSYS. In order to catch any callbacks that
682                          * clean up after sleeping on some queue, we don't
683                          * break the loop here, but go around once more.
684                          */
685                         dprintk("RPC: %4d got signal\n", task->tk_pid);
686                         task->tk_flags |= RPC_TASK_KILLED;
687                         rpc_exit(task, -ERESTARTSYS);
688                         rpc_wake_up_task(task);
689                 }
690                 rpc_set_running(task);
691                 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
692         }
693
694         dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
695         /* Wake up anyone who is waiting for task completion */
696         rpc_mark_complete_task(task);
697         /* Release all resources associated with the task */
698         rpc_release_task(task);
699         return status;
700 }
701
702 /*
703  * User-visible entry point to the scheduler.
704  *
705  * This may be called recursively if e.g. an async NFS task updates
706  * the attributes and finds that dirty pages must be flushed.
707  * NOTE: Upon exit of this function the task is guaranteed to be
708  *       released. In particular note that tk_release() will have
709  *       been called, so your task memory may have been freed.
710  */
711 int
712 rpc_execute(struct rpc_task *task)
713 {
714         rpc_set_active(task);
715         rpc_set_running(task);
716         return __rpc_execute(task);
717 }
718
719 static void rpc_async_schedule(void *arg)
720 {
721         __rpc_execute((struct rpc_task *)arg);
722 }
723
724 /**
725  * rpc_malloc - allocate an RPC buffer
726  * @task: RPC task that will use this buffer
727  * @size: requested byte size
728  *
729  * We try to ensure that some NFS reads and writes can always proceed
730  * by using a mempool when allocating 'small' buffers.
731  * In order to avoid memory starvation triggering more writebacks of
732  * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
733  */
734 void * rpc_malloc(struct rpc_task *task, size_t size)
735 {
736         struct rpc_rqst *req = task->tk_rqstp;
737         gfp_t   gfp;
738
739         if (task->tk_flags & RPC_TASK_SWAPPER)
740                 gfp = GFP_ATOMIC;
741         else
742                 gfp = GFP_NOFS;
743
744         if (size > RPC_BUFFER_MAXSIZE) {
745                 req->rq_buffer = kmalloc(size, gfp);
746                 if (req->rq_buffer)
747                         req->rq_bufsize = size;
748         } else {
749                 req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
750                 if (req->rq_buffer)
751                         req->rq_bufsize = RPC_BUFFER_MAXSIZE;
752         }
753         return req->rq_buffer;
754 }
755
756 /**
757  * rpc_free - free buffer allocated via rpc_malloc
758  * @task: RPC task with a buffer to be freed
759  *
760  */
761 void rpc_free(struct rpc_task *task)
762 {
763         struct rpc_rqst *req = task->tk_rqstp;
764
765         if (req->rq_buffer) {
766                 if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
767                         mempool_free(req->rq_buffer, rpc_buffer_mempool);
768                 else
769                         kfree(req->rq_buffer);
770                 req->rq_buffer = NULL;
771                 req->rq_bufsize = 0;
772         }
773 }
774
775 /*
776  * Creation and deletion of RPC task structures
777  */
778 void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
779 {
780         memset(task, 0, sizeof(*task));
781         init_timer(&task->tk_timer);
782         task->tk_timer.data     = (unsigned long) task;
783         task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
784         atomic_set(&task->tk_count, 1);
785         task->tk_client = clnt;
786         task->tk_flags  = flags;
787         task->tk_ops = tk_ops;
788         if (tk_ops->rpc_call_prepare != NULL)
789                 task->tk_action = rpc_prepare_task;
790         task->tk_calldata = calldata;
791
792         /* Initialize retry counters */
793         task->tk_garb_retry = 2;
794         task->tk_cred_retry = 2;
795
796         task->tk_priority = RPC_PRIORITY_NORMAL;
797         task->tk_cookie = (unsigned long)current;
798
799         /* Initialize workqueue for async tasks */
800         task->tk_workqueue = rpciod_workqueue;
801
802         if (clnt) {
803                 atomic_inc(&clnt->cl_users);
804                 if (clnt->cl_softrtry)
805                         task->tk_flags |= RPC_TASK_SOFT;
806                 if (!clnt->cl_intr)
807                         task->tk_flags |= RPC_TASK_NOINTR;
808         }
809
810 #ifdef RPC_DEBUG
811         task->tk_magic = RPC_TASK_MAGIC_ID;
812         task->tk_pid = rpc_task_id++;
813 #endif
814         /* Add to global list of all tasks */
815         spin_lock(&rpc_sched_lock);
816         list_add_tail(&task->tk_task, &all_tasks);
817         spin_unlock(&rpc_sched_lock);
818
819         BUG_ON(task->tk_ops == NULL);
820
821         /* starting timestamp */
822         task->tk_start = jiffies;
823
824         dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
825                                 current->pid);
826 }
827
828 static struct rpc_task *
829 rpc_alloc_task(void)
830 {
831         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
832 }
833
834 static void rpc_free_task(struct rpc_task *task)
835 {
836         dprintk("RPC: %4d freeing task\n", task->tk_pid);
837         mempool_free(task, rpc_task_mempool);
838 }
839
840 /*
841  * Create a new task for the specified client.  We have to
842  * clean up after an allocation failure, as the client may
843  * have specified "oneshot".
844  */
845 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
846 {
847         struct rpc_task *task;
848
849         task = rpc_alloc_task();
850         if (!task)
851                 goto cleanup;
852
853         rpc_init_task(task, clnt, flags, tk_ops, calldata);
854
855         dprintk("RPC: %4d allocated task\n", task->tk_pid);
856         task->tk_flags |= RPC_TASK_DYNAMIC;
857 out:
858         return task;
859
860 cleanup:
861         /* Check whether to release the client */
862         if (clnt) {
863                 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
864                         atomic_read(&clnt->cl_users), clnt->cl_oneshot);
865                 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
866                 rpc_release_client(clnt);
867         }
868         goto out;
869 }
870
871 void rpc_release_task(struct rpc_task *task)
872 {
873         const struct rpc_call_ops *tk_ops = task->tk_ops;
874         void *calldata = task->tk_calldata;
875
876 #ifdef RPC_DEBUG
877         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
878 #endif
879         if (!atomic_dec_and_test(&task->tk_count))
880                 return;
881         dprintk("RPC: %4d release task\n", task->tk_pid);
882
883         /* Remove from global task list */
884         spin_lock(&rpc_sched_lock);
885         list_del(&task->tk_task);
886         spin_unlock(&rpc_sched_lock);
887
888         BUG_ON (RPC_IS_QUEUED(task));
889
890         /* Synchronously delete any running timer */
891         rpc_delete_timer(task);
892
893         /* Release resources */
894         if (task->tk_rqstp)
895                 xprt_release(task);
896         if (task->tk_msg.rpc_cred)
897                 rpcauth_unbindcred(task);
898         if (task->tk_client) {
899                 rpc_release_client(task->tk_client);
900                 task->tk_client = NULL;
901         }
902
903 #ifdef RPC_DEBUG
904         task->tk_magic = 0;
905 #endif
906         if (task->tk_flags & RPC_TASK_DYNAMIC)
907                 rpc_free_task(task);
908         if (tk_ops->rpc_release)
909                 tk_ops->rpc_release(calldata);
910 }
911
912 /**
913  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
914  * @clnt: pointer to RPC client
915  * @flags: RPC flags
916  * @ops: RPC call ops
917  * @data: user call data
918  */
919 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
920                                         const struct rpc_call_ops *ops,
921                                         void *data)
922 {
923         struct rpc_task *task;
924         task = rpc_new_task(clnt, flags, ops, data);
925         if (task == NULL) {
926                 if (ops->rpc_release != NULL)
927                         ops->rpc_release(data);
928                 return ERR_PTR(-ENOMEM);
929         }
930         atomic_inc(&task->tk_count);
931         rpc_execute(task);
932         return task;
933 }
934 EXPORT_SYMBOL(rpc_run_task);
935
936 /**
937  * rpc_find_parent - find the parent of a child task.
938  * @child: child task
939  * @parent: parent task
940  *
941  * Checks that the parent task is still sleeping on the
942  * queue 'childq'. If so returns a pointer to the parent.
943  * Upon failure returns NULL.
944  *
945  * Caller must hold childq.lock
946  */
947 static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
948 {
949         struct rpc_task *task;
950         struct list_head *le;
951
952         task_for_each(task, le, &childq.tasks[0])
953                 if (task == parent)
954                         return parent;
955
956         return NULL;
957 }
958
959 static void rpc_child_exit(struct rpc_task *child, void *calldata)
960 {
961         struct rpc_task *parent;
962
963         spin_lock_bh(&childq.lock);
964         if ((parent = rpc_find_parent(child, calldata)) != NULL) {
965                 parent->tk_status = child->tk_status;
966                 __rpc_wake_up_task(parent);
967         }
968         spin_unlock_bh(&childq.lock);
969 }
970
971 static const struct rpc_call_ops rpc_child_ops = {
972         .rpc_call_done = rpc_child_exit,
973 };
974
975 /*
976  * Note: rpc_new_task releases the client after a failure.
977  */
978 struct rpc_task *
979 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
980 {
981         struct rpc_task *task;
982
983         task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
984         if (!task)
985                 goto fail;
986         return task;
987
988 fail:
989         parent->tk_status = -ENOMEM;
990         return NULL;
991 }
992
993 void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
994 {
995         spin_lock_bh(&childq.lock);
996         /* N.B. Is it possible for the child to have already finished? */
997         __rpc_sleep_on(&childq, task, func, NULL);
998         rpc_schedule_run(child);
999         spin_unlock_bh(&childq.lock);
1000 }
1001
1002 /*
1003  * Kill all tasks for the given client.
1004  * XXX: kill their descendants as well?
1005  */
1006 void rpc_killall_tasks(struct rpc_clnt *clnt)
1007 {
1008         struct rpc_task *rovr;
1009         struct list_head *le;
1010
1011         dprintk("RPC:      killing all tasks for client %p\n", clnt);
1012
1013         /*
1014          * Spin lock all_tasks to prevent changes...
1015          */
1016         spin_lock(&rpc_sched_lock);
1017         alltask_for_each(rovr, le, &all_tasks) {
1018                 if (! RPC_IS_ACTIVATED(rovr))
1019                         continue;
1020                 if (!clnt || rovr->tk_client == clnt) {
1021                         rovr->tk_flags |= RPC_TASK_KILLED;
1022                         rpc_exit(rovr, -EIO);
1023                         rpc_wake_up_task(rovr);
1024                 }
1025         }
1026         spin_unlock(&rpc_sched_lock);
1027 }
1028
1029 static DECLARE_MUTEX_LOCKED(rpciod_running);
1030
1031 static void rpciod_killall(void)
1032 {
1033         unsigned long flags;
1034
1035         while (!list_empty(&all_tasks)) {
1036                 clear_thread_flag(TIF_SIGPENDING);
1037                 rpc_killall_tasks(NULL);
1038                 flush_workqueue(rpciod_workqueue);
1039                 if (!list_empty(&all_tasks)) {
1040                         dprintk("rpciod_killall: waiting for tasks to exit\n");
1041                         yield();
1042                 }
1043         }
1044
1045         spin_lock_irqsave(&current->sighand->siglock, flags);
1046         recalc_sigpending();
1047         spin_unlock_irqrestore(&current->sighand->siglock, flags);
1048 }
1049
1050 /*
1051  * Start up the rpciod process if it's not already running.
1052  */
1053 int
1054 rpciod_up(void)
1055 {
1056         struct workqueue_struct *wq;
1057         int error = 0;
1058
1059         mutex_lock(&rpciod_mutex);
1060         dprintk("rpciod_up: users %d\n", rpciod_users);
1061         rpciod_users++;
1062         if (rpciod_workqueue)
1063                 goto out;
1064         /*
1065          * If there's no pid, we should be the first user.
1066          */
1067         if (rpciod_users > 1)
1068                 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1069         /*
1070          * Create the rpciod thread and wait for it to start.
1071          */
1072         error = -ENOMEM;
1073         wq = create_workqueue("rpciod");
1074         if (wq == NULL) {
1075                 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1076                 rpciod_users--;
1077                 goto out;
1078         }
1079         rpciod_workqueue = wq;
1080         error = 0;
1081 out:
1082         mutex_unlock(&rpciod_mutex);
1083         return error;
1084 }
1085
1086 void
1087 rpciod_down(void)
1088 {
1089         mutex_lock(&rpciod_mutex);
1090         dprintk("rpciod_down sema %d\n", rpciod_users);
1091         if (rpciod_users) {
1092                 if (--rpciod_users)
1093                         goto out;
1094         } else
1095                 printk(KERN_WARNING "rpciod_down: no users??\n");
1096
1097         if (!rpciod_workqueue) {
1098                 dprintk("rpciod_down: Nothing to do!\n");
1099                 goto out;
1100         }
1101         rpciod_killall();
1102
1103         destroy_workqueue(rpciod_workqueue);
1104         rpciod_workqueue = NULL;
1105  out:
1106         mutex_unlock(&rpciod_mutex);
1107 }
1108
1109 #ifdef RPC_DEBUG
1110 void rpc_show_tasks(void)
1111 {
1112         struct list_head *le;
1113         struct rpc_task *t;
1114
1115         spin_lock(&rpc_sched_lock);
1116         if (list_empty(&all_tasks)) {
1117                 spin_unlock(&rpc_sched_lock);
1118                 return;
1119         }
1120         printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1121                 "-rpcwait -action- ---ops--\n");
1122         alltask_for_each(t, le, &all_tasks) {
1123                 const char *rpc_waitq = "none";
1124
1125                 if (RPC_IS_QUEUED(t))
1126                         rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1127
1128                 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1129                         t->tk_pid,
1130                         (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1131                         t->tk_flags, t->tk_status,
1132                         t->tk_client,
1133                         (t->tk_client ? t->tk_client->cl_prog : 0),
1134                         t->tk_rqstp, t->tk_timeout,
1135                         rpc_waitq,
1136                         t->tk_action, t->tk_ops);
1137         }
1138         spin_unlock(&rpc_sched_lock);
1139 }
1140 #endif
1141
1142 void
1143 rpc_destroy_mempool(void)
1144 {
1145         if (rpc_buffer_mempool)
1146                 mempool_destroy(rpc_buffer_mempool);
1147         if (rpc_task_mempool)
1148                 mempool_destroy(rpc_task_mempool);
1149         if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
1150                 printk(KERN_INFO "rpc_task: not all structures were freed\n");
1151         if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
1152                 printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
1153 }
1154
1155 int
1156 rpc_init_mempool(void)
1157 {
1158         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1159                                              sizeof(struct rpc_task),
1160                                              0, SLAB_HWCACHE_ALIGN,
1161                                              NULL, NULL);
1162         if (!rpc_task_slabp)
1163                 goto err_nomem;
1164         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1165                                              RPC_BUFFER_MAXSIZE,
1166                                              0, SLAB_HWCACHE_ALIGN,
1167                                              NULL, NULL);
1168         if (!rpc_buffer_slabp)
1169                 goto err_nomem;
1170         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1171                                                     rpc_task_slabp);
1172         if (!rpc_task_mempool)
1173                 goto err_nomem;
1174         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1175                                                       rpc_buffer_slabp);
1176         if (!rpc_buffer_mempool)
1177                 goto err_nomem;
1178         return 0;
1179 err_nomem:
1180         rpc_destroy_mempool();
1181         return -ENOMEM;
1182 }