Merge branch 'linus'
[pandora-kernel.git] / net / sunrpc / sched.c
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  * 
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11
12 #include <linux/module.h>
13
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21
22 #include <linux/sunrpc/clnt.h>
23 #include <linux/sunrpc/xprt.h>
24
25 #ifdef RPC_DEBUG
26 #define RPCDBG_FACILITY         RPCDBG_SCHED
27 #define RPC_TASK_MAGIC_ID       0xf00baa
28 static int                      rpc_task_id;
29 #endif
30
31 /*
32  * RPC slabs and memory pools
33  */
34 #define RPC_BUFFER_MAXSIZE      (2048)
35 #define RPC_BUFFER_POOLSIZE     (8)
36 #define RPC_TASK_POOLSIZE       (8)
37 static kmem_cache_t     *rpc_task_slabp __read_mostly;
38 static kmem_cache_t     *rpc_buffer_slabp __read_mostly;
39 static mempool_t        *rpc_task_mempool __read_mostly;
40 static mempool_t        *rpc_buffer_mempool __read_mostly;
41
42 static void                     __rpc_default_timer(struct rpc_task *task);
43 static void                     rpciod_killall(void);
44 static void                     rpc_async_schedule(void *);
45
46 /*
47  * RPC tasks that create another task (e.g. for contacting the portmapper)
48  * will wait on this queue for their child's completion
49  */
50 static RPC_WAITQ(childq, "childq");
51
52 /*
53  * RPC tasks sit here while waiting for conditions to improve.
54  */
55 static RPC_WAITQ(delay_queue, "delayq");
56
57 /*
58  * All RPC tasks are linked into this list
59  */
60 static LIST_HEAD(all_tasks);
61
62 /*
63  * rpciod-related stuff
64  */
65 static DECLARE_MUTEX(rpciod_sema);
66 static unsigned int             rpciod_users;
67 struct workqueue_struct *rpciod_workqueue;
68
69 /*
70  * Spinlock for other critical sections of code.
71  */
72 static DEFINE_SPINLOCK(rpc_sched_lock);
73
74 /*
75  * Disable the timer for a given RPC task. Should be called with
76  * queue->lock and bh_disabled in order to avoid races within
77  * rpc_run_timer().
78  */
79 static inline void
80 __rpc_disable_timer(struct rpc_task *task)
81 {
82         dprintk("RPC: %4d disabling timer\n", task->tk_pid);
83         task->tk_timeout_fn = NULL;
84         task->tk_timeout = 0;
85 }
86
87 /*
88  * Run a timeout function.
89  * We use the callback in order to allow __rpc_wake_up_task()
90  * and friends to disable the timer synchronously on SMP systems
91  * without calling del_timer_sync(). The latter could cause a
92  * deadlock if called while we're holding spinlocks...
93  */
94 static void rpc_run_timer(struct rpc_task *task)
95 {
96         void (*callback)(struct rpc_task *);
97
98         callback = task->tk_timeout_fn;
99         task->tk_timeout_fn = NULL;
100         if (callback && RPC_IS_QUEUED(task)) {
101                 dprintk("RPC: %4d running timer\n", task->tk_pid);
102                 callback(task);
103         }
104         smp_mb__before_clear_bit();
105         clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
106         smp_mb__after_clear_bit();
107 }
108
109 /*
110  * Set up a timer for the current task.
111  */
112 static inline void
113 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
114 {
115         if (!task->tk_timeout)
116                 return;
117
118         dprintk("RPC: %4d setting alarm for %lu ms\n",
119                         task->tk_pid, task->tk_timeout * 1000 / HZ);
120
121         if (timer)
122                 task->tk_timeout_fn = timer;
123         else
124                 task->tk_timeout_fn = __rpc_default_timer;
125         set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
126         mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
127 }
128
129 /*
130  * Delete any timer for the current task. Because we use del_timer_sync(),
131  * this function should never be called while holding queue->lock.
132  */
133 static void
134 rpc_delete_timer(struct rpc_task *task)
135 {
136         if (RPC_IS_QUEUED(task))
137                 return;
138         if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
139                 del_singleshot_timer_sync(&task->tk_timer);
140                 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
141         }
142 }
143
144 /*
145  * Add new request to a priority queue.
146  */
147 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
148 {
149         struct list_head *q;
150         struct rpc_task *t;
151
152         INIT_LIST_HEAD(&task->u.tk_wait.links);
153         q = &queue->tasks[task->tk_priority];
154         if (unlikely(task->tk_priority > queue->maxpriority))
155                 q = &queue->tasks[queue->maxpriority];
156         list_for_each_entry(t, q, u.tk_wait.list) {
157                 if (t->tk_cookie == task->tk_cookie) {
158                         list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
159                         return;
160                 }
161         }
162         list_add_tail(&task->u.tk_wait.list, q);
163 }
164
165 /*
166  * Add new request to wait queue.
167  *
168  * Swapper tasks always get inserted at the head of the queue.
169  * This should avoid many nasty memory deadlocks and hopefully
170  * improve overall performance.
171  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
172  */
173 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
174 {
175         BUG_ON (RPC_IS_QUEUED(task));
176
177         if (RPC_IS_PRIORITY(queue))
178                 __rpc_add_wait_queue_priority(queue, task);
179         else if (RPC_IS_SWAPPER(task))
180                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
181         else
182                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
183         task->u.tk_wait.rpc_waitq = queue;
184         queue->qlen++;
185         rpc_set_queued(task);
186
187         dprintk("RPC: %4d added to queue %p \"%s\"\n",
188                                 task->tk_pid, queue, rpc_qname(queue));
189 }
190
191 /*
192  * Remove request from a priority queue.
193  */
194 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
195 {
196         struct rpc_task *t;
197
198         if (!list_empty(&task->u.tk_wait.links)) {
199                 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
200                 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
201                 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
202         }
203         list_del(&task->u.tk_wait.list);
204 }
205
206 /*
207  * Remove request from queue.
208  * Note: must be called with spin lock held.
209  */
210 static void __rpc_remove_wait_queue(struct rpc_task *task)
211 {
212         struct rpc_wait_queue *queue;
213         queue = task->u.tk_wait.rpc_waitq;
214
215         if (RPC_IS_PRIORITY(queue))
216                 __rpc_remove_wait_queue_priority(task);
217         else
218                 list_del(&task->u.tk_wait.list);
219         queue->qlen--;
220         dprintk("RPC: %4d removed from queue %p \"%s\"\n",
221                                 task->tk_pid, queue, rpc_qname(queue));
222 }
223
224 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
225 {
226         queue->priority = priority;
227         queue->count = 1 << (priority * 2);
228 }
229
230 static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
231 {
232         queue->cookie = cookie;
233         queue->nr = RPC_BATCH_COUNT;
234 }
235
236 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
237 {
238         rpc_set_waitqueue_priority(queue, queue->maxpriority);
239         rpc_set_waitqueue_cookie(queue, 0);
240 }
241
242 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
243 {
244         int i;
245
246         spin_lock_init(&queue->lock);
247         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
248                 INIT_LIST_HEAD(&queue->tasks[i]);
249         queue->maxpriority = maxprio;
250         rpc_reset_waitqueue_priority(queue);
251 #ifdef RPC_DEBUG
252         queue->name = qname;
253 #endif
254 }
255
256 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
257 {
258         __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
259 }
260
261 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
262 {
263         __rpc_init_priority_wait_queue(queue, qname, 0);
264 }
265 EXPORT_SYMBOL(rpc_init_wait_queue);
266
267 static int rpc_wait_bit_interruptible(void *word)
268 {
269         if (signal_pending(current))
270                 return -ERESTARTSYS;
271         schedule();
272         return 0;
273 }
274
275 /*
276  * Mark an RPC call as having completed by clearing the 'active' bit
277  */
278 static inline void rpc_mark_complete_task(struct rpc_task *task)
279 {
280         rpc_clear_active(task);
281         wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
282 }
283
284 /*
285  * Allow callers to wait for completion of an RPC call
286  */
287 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
288 {
289         if (action == NULL)
290                 action = rpc_wait_bit_interruptible;
291         return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
292                         action, TASK_INTERRUPTIBLE);
293 }
294 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
295
296 /*
297  * Make an RPC task runnable.
298  *
299  * Note: If the task is ASYNC, this must be called with 
300  * the spinlock held to protect the wait queue operation.
301  */
302 static void rpc_make_runnable(struct rpc_task *task)
303 {
304         int do_ret;
305
306         BUG_ON(task->tk_timeout_fn);
307         do_ret = rpc_test_and_set_running(task);
308         rpc_clear_queued(task);
309         if (do_ret)
310                 return;
311         if (RPC_IS_ASYNC(task)) {
312                 int status;
313
314                 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
315                 status = queue_work(task->tk_workqueue, &task->u.tk_work);
316                 if (status < 0) {
317                         printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
318                         task->tk_status = status;
319                         return;
320                 }
321         } else
322                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
323 }
324
325 /*
326  * Place a newly initialized task on the workqueue.
327  */
328 static inline void
329 rpc_schedule_run(struct rpc_task *task)
330 {
331         rpc_set_active(task);
332         rpc_make_runnable(task);
333 }
334
335 /*
336  * Prepare for sleeping on a wait queue.
337  * By always appending tasks to the list we ensure FIFO behavior.
338  * NB: An RPC task will only receive interrupt-driven events as long
339  * as it's on a wait queue.
340  */
341 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
342                         rpc_action action, rpc_action timer)
343 {
344         dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
345                                 rpc_qname(q), jiffies);
346
347         if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
348                 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
349                 return;
350         }
351
352         /* Mark the task as being activated if so needed */
353         rpc_set_active(task);
354
355         __rpc_add_wait_queue(q, task);
356
357         BUG_ON(task->tk_callback != NULL);
358         task->tk_callback = action;
359         __rpc_add_timer(task, timer);
360 }
361
362 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
363                                 rpc_action action, rpc_action timer)
364 {
365         /*
366          * Protect the queue operations.
367          */
368         spin_lock_bh(&q->lock);
369         __rpc_sleep_on(q, task, action, timer);
370         spin_unlock_bh(&q->lock);
371 }
372
373 /**
374  * __rpc_do_wake_up_task - wake up a single rpc_task
375  * @task: task to be woken up
376  *
377  * Caller must hold queue->lock, and have cleared the task queued flag.
378  */
379 static void __rpc_do_wake_up_task(struct rpc_task *task)
380 {
381         dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
382
383 #ifdef RPC_DEBUG
384         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
385 #endif
386         /* Has the task been executed yet? If not, we cannot wake it up! */
387         if (!RPC_IS_ACTIVATED(task)) {
388                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
389                 return;
390         }
391
392         __rpc_disable_timer(task);
393         __rpc_remove_wait_queue(task);
394
395         rpc_make_runnable(task);
396
397         dprintk("RPC:      __rpc_wake_up_task done\n");
398 }
399
400 /*
401  * Wake up the specified task
402  */
403 static void __rpc_wake_up_task(struct rpc_task *task)
404 {
405         if (rpc_start_wakeup(task)) {
406                 if (RPC_IS_QUEUED(task))
407                         __rpc_do_wake_up_task(task);
408                 rpc_finish_wakeup(task);
409         }
410 }
411
412 /*
413  * Default timeout handler if none specified by user
414  */
415 static void
416 __rpc_default_timer(struct rpc_task *task)
417 {
418         dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
419         task->tk_status = -ETIMEDOUT;
420         rpc_wake_up_task(task);
421 }
422
423 /*
424  * Wake up the specified task
425  */
426 void rpc_wake_up_task(struct rpc_task *task)
427 {
428         if (rpc_start_wakeup(task)) {
429                 if (RPC_IS_QUEUED(task)) {
430                         struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
431
432                         spin_lock_bh(&queue->lock);
433                         __rpc_do_wake_up_task(task);
434                         spin_unlock_bh(&queue->lock);
435                 }
436                 rpc_finish_wakeup(task);
437         }
438 }
439
440 /*
441  * Wake up the next task on a priority queue.
442  */
443 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
444 {
445         struct list_head *q;
446         struct rpc_task *task;
447
448         /*
449          * Service a batch of tasks from a single cookie.
450          */
451         q = &queue->tasks[queue->priority];
452         if (!list_empty(q)) {
453                 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
454                 if (queue->cookie == task->tk_cookie) {
455                         if (--queue->nr)
456                                 goto out;
457                         list_move_tail(&task->u.tk_wait.list, q);
458                 }
459                 /*
460                  * Check if we need to switch queues.
461                  */
462                 if (--queue->count)
463                         goto new_cookie;
464         }
465
466         /*
467          * Service the next queue.
468          */
469         do {
470                 if (q == &queue->tasks[0])
471                         q = &queue->tasks[queue->maxpriority];
472                 else
473                         q = q - 1;
474                 if (!list_empty(q)) {
475                         task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
476                         goto new_queue;
477                 }
478         } while (q != &queue->tasks[queue->priority]);
479
480         rpc_reset_waitqueue_priority(queue);
481         return NULL;
482
483 new_queue:
484         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
485 new_cookie:
486         rpc_set_waitqueue_cookie(queue, task->tk_cookie);
487 out:
488         __rpc_wake_up_task(task);
489         return task;
490 }
491
492 /*
493  * Wake up the next task on the wait queue.
494  */
495 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
496 {
497         struct rpc_task *task = NULL;
498
499         dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
500         spin_lock_bh(&queue->lock);
501         if (RPC_IS_PRIORITY(queue))
502                 task = __rpc_wake_up_next_priority(queue);
503         else {
504                 task_for_first(task, &queue->tasks[0])
505                         __rpc_wake_up_task(task);
506         }
507         spin_unlock_bh(&queue->lock);
508
509         return task;
510 }
511
512 /**
513  * rpc_wake_up - wake up all rpc_tasks
514  * @queue: rpc_wait_queue on which the tasks are sleeping
515  *
516  * Grabs queue->lock
517  */
518 void rpc_wake_up(struct rpc_wait_queue *queue)
519 {
520         struct rpc_task *task, *next;
521         struct list_head *head;
522
523         spin_lock_bh(&queue->lock);
524         head = &queue->tasks[queue->maxpriority];
525         for (;;) {
526                 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
527                         __rpc_wake_up_task(task);
528                 if (head == &queue->tasks[0])
529                         break;
530                 head--;
531         }
532         spin_unlock_bh(&queue->lock);
533 }
534
535 /**
536  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
537  * @queue: rpc_wait_queue on which the tasks are sleeping
538  * @status: status value to set
539  *
540  * Grabs queue->lock
541  */
542 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
543 {
544         struct rpc_task *task, *next;
545         struct list_head *head;
546
547         spin_lock_bh(&queue->lock);
548         head = &queue->tasks[queue->maxpriority];
549         for (;;) {
550                 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
551                         task->tk_status = status;
552                         __rpc_wake_up_task(task);
553                 }
554                 if (head == &queue->tasks[0])
555                         break;
556                 head--;
557         }
558         spin_unlock_bh(&queue->lock);
559 }
560
561 /*
562  * Run a task at a later time
563  */
564 static void     __rpc_atrun(struct rpc_task *);
565 void
566 rpc_delay(struct rpc_task *task, unsigned long delay)
567 {
568         task->tk_timeout = delay;
569         rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
570 }
571
572 static void
573 __rpc_atrun(struct rpc_task *task)
574 {
575         task->tk_status = 0;
576         rpc_wake_up_task(task);
577 }
578
579 /*
580  * Helper to call task->tk_ops->rpc_call_prepare
581  */
582 static void rpc_prepare_task(struct rpc_task *task)
583 {
584         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
585 }
586
587 /*
588  * Helper that calls task->tk_ops->rpc_call_done if it exists
589  */
590 void rpc_exit_task(struct rpc_task *task)
591 {
592         task->tk_action = NULL;
593         if (task->tk_ops->rpc_call_done != NULL) {
594                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
595                 if (task->tk_action != NULL) {
596                         WARN_ON(RPC_ASSASSINATED(task));
597                         /* Always release the RPC slot and buffer memory */
598                         xprt_release(task);
599                 }
600         }
601 }
602 EXPORT_SYMBOL(rpc_exit_task);
603
604 /*
605  * This is the RPC `scheduler' (or rather, the finite state machine).
606  */
607 static int __rpc_execute(struct rpc_task *task)
608 {
609         int             status = 0;
610
611         dprintk("RPC: %4d rpc_execute flgs %x\n",
612                                 task->tk_pid, task->tk_flags);
613
614         BUG_ON(RPC_IS_QUEUED(task));
615
616         for (;;) {
617                 /*
618                  * Garbage collection of pending timers...
619                  */
620                 rpc_delete_timer(task);
621
622                 /*
623                  * Execute any pending callback.
624                  */
625                 if (RPC_DO_CALLBACK(task)) {
626                         /* Define a callback save pointer */
627                         void (*save_callback)(struct rpc_task *);
628         
629                         /* 
630                          * If a callback exists, save it, reset it,
631                          * call it.
632                          * The save is needed to stop from resetting
633                          * another callback set within the callback handler
634                          * - Dave
635                          */
636                         save_callback=task->tk_callback;
637                         task->tk_callback=NULL;
638                         lock_kernel();
639                         save_callback(task);
640                         unlock_kernel();
641                 }
642
643                 /*
644                  * Perform the next FSM step.
645                  * tk_action may be NULL when the task has been killed
646                  * by someone else.
647                  */
648                 if (!RPC_IS_QUEUED(task)) {
649                         if (task->tk_action == NULL)
650                                 break;
651                         lock_kernel();
652                         task->tk_action(task);
653                         unlock_kernel();
654                 }
655
656                 /*
657                  * Lockless check for whether task is sleeping or not.
658                  */
659                 if (!RPC_IS_QUEUED(task))
660                         continue;
661                 rpc_clear_running(task);
662                 if (RPC_IS_ASYNC(task)) {
663                         /* Careful! we may have raced... */
664                         if (RPC_IS_QUEUED(task))
665                                 return 0;
666                         if (rpc_test_and_set_running(task))
667                                 return 0;
668                         continue;
669                 }
670
671                 /* sync task: sleep here */
672                 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
673                 /* Note: Caller should be using rpc_clnt_sigmask() */
674                 status = out_of_line_wait_on_bit(&task->tk_runstate,
675                                 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
676                                 TASK_INTERRUPTIBLE);
677                 if (status == -ERESTARTSYS) {
678                         /*
679                          * When a sync task receives a signal, it exits with
680                          * -ERESTARTSYS. In order to catch any callbacks that
681                          * clean up after sleeping on some queue, we don't
682                          * break the loop here, but go around once more.
683                          */
684                         dprintk("RPC: %4d got signal\n", task->tk_pid);
685                         task->tk_flags |= RPC_TASK_KILLED;
686                         rpc_exit(task, -ERESTARTSYS);
687                         rpc_wake_up_task(task);
688                 }
689                 rpc_set_running(task);
690                 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
691         }
692
693         dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
694         /* Wake up anyone who is waiting for task completion */
695         rpc_mark_complete_task(task);
696         /* Release all resources associated with the task */
697         rpc_release_task(task);
698         return status;
699 }
700
701 /*
702  * User-visible entry point to the scheduler.
703  *
704  * This may be called recursively if e.g. an async NFS task updates
705  * the attributes and finds that dirty pages must be flushed.
706  * NOTE: Upon exit of this function the task is guaranteed to be
707  *       released. In particular note that tk_release() will have
708  *       been called, so your task memory may have been freed.
709  */
710 int
711 rpc_execute(struct rpc_task *task)
712 {
713         rpc_set_active(task);
714         rpc_set_running(task);
715         return __rpc_execute(task);
716 }
717
718 static void rpc_async_schedule(void *arg)
719 {
720         __rpc_execute((struct rpc_task *)arg);
721 }
722
723 /**
724  * rpc_malloc - allocate an RPC buffer
725  * @task: RPC task that will use this buffer
726  * @size: requested byte size
727  *
728  * We try to ensure that some NFS reads and writes can always proceed
729  * by using a mempool when allocating 'small' buffers.
730  * In order to avoid memory starvation triggering more writebacks of
731  * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
732  */
733 void * rpc_malloc(struct rpc_task *task, size_t size)
734 {
735         struct rpc_rqst *req = task->tk_rqstp;
736         gfp_t   gfp;
737
738         if (task->tk_flags & RPC_TASK_SWAPPER)
739                 gfp = GFP_ATOMIC;
740         else
741                 gfp = GFP_NOFS;
742
743         if (size > RPC_BUFFER_MAXSIZE) {
744                 req->rq_buffer = kmalloc(size, gfp);
745                 if (req->rq_buffer)
746                         req->rq_bufsize = size;
747         } else {
748                 req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
749                 if (req->rq_buffer)
750                         req->rq_bufsize = RPC_BUFFER_MAXSIZE;
751         }
752         return req->rq_buffer;
753 }
754
755 /**
756  * rpc_free - free buffer allocated via rpc_malloc
757  * @task: RPC task with a buffer to be freed
758  *
759  */
760 void rpc_free(struct rpc_task *task)
761 {
762         struct rpc_rqst *req = task->tk_rqstp;
763
764         if (req->rq_buffer) {
765                 if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
766                         mempool_free(req->rq_buffer, rpc_buffer_mempool);
767                 else
768                         kfree(req->rq_buffer);
769                 req->rq_buffer = NULL;
770                 req->rq_bufsize = 0;
771         }
772 }
773
774 /*
775  * Creation and deletion of RPC task structures
776  */
777 void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
778 {
779         memset(task, 0, sizeof(*task));
780         init_timer(&task->tk_timer);
781         task->tk_timer.data     = (unsigned long) task;
782         task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
783         atomic_set(&task->tk_count, 1);
784         task->tk_client = clnt;
785         task->tk_flags  = flags;
786         task->tk_ops = tk_ops;
787         if (tk_ops->rpc_call_prepare != NULL)
788                 task->tk_action = rpc_prepare_task;
789         task->tk_calldata = calldata;
790
791         /* Initialize retry counters */
792         task->tk_garb_retry = 2;
793         task->tk_cred_retry = 2;
794
795         task->tk_priority = RPC_PRIORITY_NORMAL;
796         task->tk_cookie = (unsigned long)current;
797
798         /* Initialize workqueue for async tasks */
799         task->tk_workqueue = rpciod_workqueue;
800
801         if (clnt) {
802                 atomic_inc(&clnt->cl_users);
803                 if (clnt->cl_softrtry)
804                         task->tk_flags |= RPC_TASK_SOFT;
805                 if (!clnt->cl_intr)
806                         task->tk_flags |= RPC_TASK_NOINTR;
807         }
808
809 #ifdef RPC_DEBUG
810         task->tk_magic = RPC_TASK_MAGIC_ID;
811         task->tk_pid = rpc_task_id++;
812 #endif
813         /* Add to global list of all tasks */
814         spin_lock(&rpc_sched_lock);
815         list_add_tail(&task->tk_task, &all_tasks);
816         spin_unlock(&rpc_sched_lock);
817
818         BUG_ON(task->tk_ops == NULL);
819
820         /* starting timestamp */
821         task->tk_start = jiffies;
822
823         dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
824                                 current->pid);
825 }
826
827 static struct rpc_task *
828 rpc_alloc_task(void)
829 {
830         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
831 }
832
833 static void rpc_free_task(struct rpc_task *task)
834 {
835         dprintk("RPC: %4d freeing task\n", task->tk_pid);
836         mempool_free(task, rpc_task_mempool);
837 }
838
839 /*
840  * Create a new task for the specified client.  We have to
841  * clean up after an allocation failure, as the client may
842  * have specified "oneshot".
843  */
844 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
845 {
846         struct rpc_task *task;
847
848         task = rpc_alloc_task();
849         if (!task)
850                 goto cleanup;
851
852         rpc_init_task(task, clnt, flags, tk_ops, calldata);
853
854         dprintk("RPC: %4d allocated task\n", task->tk_pid);
855         task->tk_flags |= RPC_TASK_DYNAMIC;
856 out:
857         return task;
858
859 cleanup:
860         /* Check whether to release the client */
861         if (clnt) {
862                 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
863                         atomic_read(&clnt->cl_users), clnt->cl_oneshot);
864                 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
865                 rpc_release_client(clnt);
866         }
867         goto out;
868 }
869
870 void rpc_release_task(struct rpc_task *task)
871 {
872         const struct rpc_call_ops *tk_ops = task->tk_ops;
873         void *calldata = task->tk_calldata;
874
875 #ifdef RPC_DEBUG
876         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
877 #endif
878         if (!atomic_dec_and_test(&task->tk_count))
879                 return;
880         dprintk("RPC: %4d release task\n", task->tk_pid);
881
882         /* Remove from global task list */
883         spin_lock(&rpc_sched_lock);
884         list_del(&task->tk_task);
885         spin_unlock(&rpc_sched_lock);
886
887         BUG_ON (RPC_IS_QUEUED(task));
888
889         /* Synchronously delete any running timer */
890         rpc_delete_timer(task);
891
892         /* Release resources */
893         if (task->tk_rqstp)
894                 xprt_release(task);
895         if (task->tk_msg.rpc_cred)
896                 rpcauth_unbindcred(task);
897         if (task->tk_client) {
898                 rpc_release_client(task->tk_client);
899                 task->tk_client = NULL;
900         }
901
902 #ifdef RPC_DEBUG
903         task->tk_magic = 0;
904 #endif
905         if (task->tk_flags & RPC_TASK_DYNAMIC)
906                 rpc_free_task(task);
907         if (tk_ops->rpc_release)
908                 tk_ops->rpc_release(calldata);
909 }
910
911 /**
912  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
913  * @clnt: pointer to RPC client
914  * @flags: RPC flags
915  * @ops: RPC call ops
916  * @data: user call data
917  */
918 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
919                                         const struct rpc_call_ops *ops,
920                                         void *data)
921 {
922         struct rpc_task *task;
923         task = rpc_new_task(clnt, flags, ops, data);
924         if (task == NULL) {
925                 if (ops->rpc_release != NULL)
926                         ops->rpc_release(data);
927                 return ERR_PTR(-ENOMEM);
928         }
929         atomic_inc(&task->tk_count);
930         rpc_execute(task);
931         return task;
932 }
933 EXPORT_SYMBOL(rpc_run_task);
934
935 /**
936  * rpc_find_parent - find the parent of a child task.
937  * @child: child task
938  * @parent: parent task
939  *
940  * Checks that the parent task is still sleeping on the
941  * queue 'childq'. If so returns a pointer to the parent.
942  * Upon failure returns NULL.
943  *
944  * Caller must hold childq.lock
945  */
946 static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
947 {
948         struct rpc_task *task;
949         struct list_head *le;
950
951         task_for_each(task, le, &childq.tasks[0])
952                 if (task == parent)
953                         return parent;
954
955         return NULL;
956 }
957
958 static void rpc_child_exit(struct rpc_task *child, void *calldata)
959 {
960         struct rpc_task *parent;
961
962         spin_lock_bh(&childq.lock);
963         if ((parent = rpc_find_parent(child, calldata)) != NULL) {
964                 parent->tk_status = child->tk_status;
965                 __rpc_wake_up_task(parent);
966         }
967         spin_unlock_bh(&childq.lock);
968 }
969
970 static const struct rpc_call_ops rpc_child_ops = {
971         .rpc_call_done = rpc_child_exit,
972 };
973
974 /*
975  * Note: rpc_new_task releases the client after a failure.
976  */
977 struct rpc_task *
978 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
979 {
980         struct rpc_task *task;
981
982         task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
983         if (!task)
984                 goto fail;
985         return task;
986
987 fail:
988         parent->tk_status = -ENOMEM;
989         return NULL;
990 }
991
992 void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
993 {
994         spin_lock_bh(&childq.lock);
995         /* N.B. Is it possible for the child to have already finished? */
996         __rpc_sleep_on(&childq, task, func, NULL);
997         rpc_schedule_run(child);
998         spin_unlock_bh(&childq.lock);
999 }
1000
1001 /*
1002  * Kill all tasks for the given client.
1003  * XXX: kill their descendants as well?
1004  */
1005 void rpc_killall_tasks(struct rpc_clnt *clnt)
1006 {
1007         struct rpc_task *rovr;
1008         struct list_head *le;
1009
1010         dprintk("RPC:      killing all tasks for client %p\n", clnt);
1011
1012         /*
1013          * Spin lock all_tasks to prevent changes...
1014          */
1015         spin_lock(&rpc_sched_lock);
1016         alltask_for_each(rovr, le, &all_tasks) {
1017                 if (! RPC_IS_ACTIVATED(rovr))
1018                         continue;
1019                 if (!clnt || rovr->tk_client == clnt) {
1020                         rovr->tk_flags |= RPC_TASK_KILLED;
1021                         rpc_exit(rovr, -EIO);
1022                         rpc_wake_up_task(rovr);
1023                 }
1024         }
1025         spin_unlock(&rpc_sched_lock);
1026 }
1027
1028 static DECLARE_MUTEX_LOCKED(rpciod_running);
1029
1030 static void rpciod_killall(void)
1031 {
1032         unsigned long flags;
1033
1034         while (!list_empty(&all_tasks)) {
1035                 clear_thread_flag(TIF_SIGPENDING);
1036                 rpc_killall_tasks(NULL);
1037                 flush_workqueue(rpciod_workqueue);
1038                 if (!list_empty(&all_tasks)) {
1039                         dprintk("rpciod_killall: waiting for tasks to exit\n");
1040                         yield();
1041                 }
1042         }
1043
1044         spin_lock_irqsave(&current->sighand->siglock, flags);
1045         recalc_sigpending();
1046         spin_unlock_irqrestore(&current->sighand->siglock, flags);
1047 }
1048
1049 /*
1050  * Start up the rpciod process if it's not already running.
1051  */
1052 int
1053 rpciod_up(void)
1054 {
1055         struct workqueue_struct *wq;
1056         int error = 0;
1057
1058         down(&rpciod_sema);
1059         dprintk("rpciod_up: users %d\n", rpciod_users);
1060         rpciod_users++;
1061         if (rpciod_workqueue)
1062                 goto out;
1063         /*
1064          * If there's no pid, we should be the first user.
1065          */
1066         if (rpciod_users > 1)
1067                 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1068         /*
1069          * Create the rpciod thread and wait for it to start.
1070          */
1071         error = -ENOMEM;
1072         wq = create_workqueue("rpciod");
1073         if (wq == NULL) {
1074                 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1075                 rpciod_users--;
1076                 goto out;
1077         }
1078         rpciod_workqueue = wq;
1079         error = 0;
1080 out:
1081         up(&rpciod_sema);
1082         return error;
1083 }
1084
1085 void
1086 rpciod_down(void)
1087 {
1088         down(&rpciod_sema);
1089         dprintk("rpciod_down sema %d\n", rpciod_users);
1090         if (rpciod_users) {
1091                 if (--rpciod_users)
1092                         goto out;
1093         } else
1094                 printk(KERN_WARNING "rpciod_down: no users??\n");
1095
1096         if (!rpciod_workqueue) {
1097                 dprintk("rpciod_down: Nothing to do!\n");
1098                 goto out;
1099         }
1100         rpciod_killall();
1101
1102         destroy_workqueue(rpciod_workqueue);
1103         rpciod_workqueue = NULL;
1104  out:
1105         up(&rpciod_sema);
1106 }
1107
1108 #ifdef RPC_DEBUG
1109 void rpc_show_tasks(void)
1110 {
1111         struct list_head *le;
1112         struct rpc_task *t;
1113
1114         spin_lock(&rpc_sched_lock);
1115         if (list_empty(&all_tasks)) {
1116                 spin_unlock(&rpc_sched_lock);
1117                 return;
1118         }
1119         printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1120                 "-rpcwait -action- ---ops--\n");
1121         alltask_for_each(t, le, &all_tasks) {
1122                 const char *rpc_waitq = "none";
1123
1124                 if (RPC_IS_QUEUED(t))
1125                         rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1126
1127                 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1128                         t->tk_pid,
1129                         (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1130                         t->tk_flags, t->tk_status,
1131                         t->tk_client,
1132                         (t->tk_client ? t->tk_client->cl_prog : 0),
1133                         t->tk_rqstp, t->tk_timeout,
1134                         rpc_waitq,
1135                         t->tk_action, t->tk_ops);
1136         }
1137         spin_unlock(&rpc_sched_lock);
1138 }
1139 #endif
1140
1141 void
1142 rpc_destroy_mempool(void)
1143 {
1144         if (rpc_buffer_mempool)
1145                 mempool_destroy(rpc_buffer_mempool);
1146         if (rpc_task_mempool)
1147                 mempool_destroy(rpc_task_mempool);
1148         if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
1149                 printk(KERN_INFO "rpc_task: not all structures were freed\n");
1150         if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
1151                 printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
1152 }
1153
1154 int
1155 rpc_init_mempool(void)
1156 {
1157         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1158                                              sizeof(struct rpc_task),
1159                                              0, SLAB_HWCACHE_ALIGN,
1160                                              NULL, NULL);
1161         if (!rpc_task_slabp)
1162                 goto err_nomem;
1163         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1164                                              RPC_BUFFER_MAXSIZE,
1165                                              0, SLAB_HWCACHE_ALIGN,
1166                                              NULL, NULL);
1167         if (!rpc_buffer_slabp)
1168                 goto err_nomem;
1169         rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
1170                                             mempool_alloc_slab,
1171                                             mempool_free_slab,
1172                                             rpc_task_slabp);
1173         if (!rpc_task_mempool)
1174                 goto err_nomem;
1175         rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
1176                                             mempool_alloc_slab,
1177                                             mempool_free_slab,
1178                                             rpc_buffer_slabp);
1179         if (!rpc_buffer_mempool)
1180                 goto err_nomem;
1181         return 0;
1182 err_nomem:
1183         rpc_destroy_mempool();
1184         return -ENOMEM;
1185 }