2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
22 #include <linux/sunrpc/clnt.h>
27 #define RPCDBG_FACILITY RPCDBG_SCHED
31 * RPC slabs and memory pools
33 #define RPC_BUFFER_MAXSIZE (2048)
34 #define RPC_BUFFER_POOLSIZE (8)
35 #define RPC_TASK_POOLSIZE (8)
36 static struct kmem_cache *rpc_task_slabp __read_mostly;
37 static struct kmem_cache *rpc_buffer_slabp __read_mostly;
38 static mempool_t *rpc_task_mempool __read_mostly;
39 static mempool_t *rpc_buffer_mempool __read_mostly;
41 static void rpc_async_schedule(struct work_struct *);
42 static void rpc_release_task(struct rpc_task *task);
43 static void __rpc_queue_timer_fn(unsigned long ptr);
46 * RPC tasks sit here while waiting for conditions to improve.
48 static struct rpc_wait_queue delay_queue;
51 * rpciod-related stuff
53 struct workqueue_struct *rpciod_workqueue;
56 * Disable the timer for a given RPC task. Should be called with
57 * queue->lock and bh_disabled in order to avoid races within
61 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
63 if (task->tk_timeout == 0)
65 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
67 list_del(&task->u.tk_wait.timer_list);
68 if (list_empty(&queue->timer_list.list))
69 del_timer(&queue->timer_list.timer);
73 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
75 queue->timer_list.expires = expires;
76 mod_timer(&queue->timer_list.timer, expires);
80 * Set up a timer for the current task.
83 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
85 if (!task->tk_timeout)
88 dprintk("RPC: %5u setting alarm for %lu ms\n",
89 task->tk_pid, task->tk_timeout * 1000 / HZ);
91 task->u.tk_wait.expires = jiffies + task->tk_timeout;
92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
93 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
98 * Add new request to a priority queue.
100 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
101 struct rpc_task *task,
102 unsigned char queue_priority)
107 INIT_LIST_HEAD(&task->u.tk_wait.links);
108 q = &queue->tasks[queue_priority];
109 if (unlikely(queue_priority > queue->maxpriority))
110 q = &queue->tasks[queue->maxpriority];
111 list_for_each_entry(t, q, u.tk_wait.list) {
112 if (t->tk_owner == task->tk_owner) {
113 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
117 list_add_tail(&task->u.tk_wait.list, q);
121 * Add new request to wait queue.
123 * Swapper tasks always get inserted at the head of the queue.
124 * This should avoid many nasty memory deadlocks and hopefully
125 * improve overall performance.
126 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
128 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
129 struct rpc_task *task,
130 unsigned char queue_priority)
132 BUG_ON (RPC_IS_QUEUED(task));
134 if (RPC_IS_PRIORITY(queue))
135 __rpc_add_wait_queue_priority(queue, task, queue_priority);
136 else if (RPC_IS_SWAPPER(task))
137 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
139 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
140 task->tk_waitqueue = queue;
142 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
144 rpc_set_queued(task);
146 dprintk("RPC: %5u added to queue %p \"%s\"\n",
147 task->tk_pid, queue, rpc_qname(queue));
151 * Remove request from a priority queue.
153 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
157 if (!list_empty(&task->u.tk_wait.links)) {
158 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
159 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
160 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
165 * Remove request from queue.
166 * Note: must be called with spin lock held.
168 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
170 __rpc_disable_timer(queue, task);
171 if (RPC_IS_PRIORITY(queue))
172 __rpc_remove_wait_queue_priority(task);
173 list_del(&task->u.tk_wait.list);
175 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
176 task->tk_pid, queue, rpc_qname(queue));
179 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
181 queue->priority = priority;
182 queue->count = 1 << (priority * 2);
185 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
188 queue->nr = RPC_BATCH_COUNT;
191 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
193 rpc_set_waitqueue_priority(queue, queue->maxpriority);
194 rpc_set_waitqueue_owner(queue, 0);
197 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
201 spin_lock_init(&queue->lock);
202 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
203 INIT_LIST_HEAD(&queue->tasks[i]);
204 queue->maxpriority = nr_queues - 1;
205 rpc_reset_waitqueue_priority(queue);
207 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
208 INIT_LIST_HEAD(&queue->timer_list.list);
214 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
216 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
218 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
220 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
222 __rpc_init_priority_wait_queue(queue, qname, 1);
224 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
226 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
228 del_timer_sync(&queue->timer_list.timer);
230 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
232 static int rpc_wait_bit_killable(void *word)
234 if (fatal_signal_pending(current))
241 static void rpc_task_set_debuginfo(struct rpc_task *task)
243 static atomic_t rpc_pid;
245 task->tk_pid = atomic_inc_return(&rpc_pid);
248 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
253 static void rpc_set_active(struct rpc_task *task)
255 rpc_task_set_debuginfo(task);
256 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 * Mark an RPC call as having completed by clearing the 'active' bit
261 * and then waking up all tasks that were sleeping.
263 static int rpc_complete_task(struct rpc_task *task)
265 void *m = &task->tk_runstate;
266 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
267 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
271 spin_lock_irqsave(&wq->lock, flags);
272 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
273 ret = atomic_dec_and_test(&task->tk_count);
274 if (waitqueue_active(wq))
275 __wake_up_locked_key(wq, TASK_NORMAL, &k);
276 spin_unlock_irqrestore(&wq->lock, flags);
281 * Allow callers to wait for completion of an RPC call
283 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
284 * to enforce taking of the wq->lock and hence avoid races with
285 * rpc_complete_task().
287 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
290 action = rpc_wait_bit_killable;
291 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
292 action, TASK_KILLABLE);
294 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
297 * Make an RPC task runnable.
299 * Note: If the task is ASYNC, this must be called with
300 * the spinlock held to protect the wait queue operation.
302 static void rpc_make_runnable(struct rpc_task *task)
304 rpc_clear_queued(task);
305 if (rpc_test_and_set_running(task))
307 if (RPC_IS_ASYNC(task)) {
308 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
309 queue_work(rpciod_workqueue, &task->u.tk_work);
311 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
315 * Prepare for sleeping on a wait queue.
316 * By always appending tasks to the list we ensure FIFO behavior.
317 * NB: An RPC task will only receive interrupt-driven events as long
318 * as it's on a wait queue.
320 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
321 struct rpc_task *task,
323 unsigned char queue_priority)
325 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
326 task->tk_pid, rpc_qname(q), jiffies);
328 __rpc_add_wait_queue(q, task, queue_priority);
330 BUG_ON(task->tk_callback != NULL);
331 task->tk_callback = action;
332 __rpc_add_timer(q, task);
335 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
338 /* We shouldn't ever put an inactive task to sleep */
339 BUG_ON(!RPC_IS_ACTIVATED(task));
342 * Protect the queue operations.
344 spin_lock_bh(&q->lock);
345 __rpc_sleep_on_priority(q, task, action, task->tk_priority);
346 spin_unlock_bh(&q->lock);
348 EXPORT_SYMBOL_GPL(rpc_sleep_on);
350 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
351 rpc_action action, int priority)
353 /* We shouldn't ever put an inactive task to sleep */
354 BUG_ON(!RPC_IS_ACTIVATED(task));
357 * Protect the queue operations.
359 spin_lock_bh(&q->lock);
360 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
361 spin_unlock_bh(&q->lock);
365 * __rpc_do_wake_up_task - wake up a single rpc_task
367 * @task: task to be woken up
369 * Caller must hold queue->lock, and have cleared the task queued flag.
371 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
373 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
374 task->tk_pid, jiffies);
376 /* Has the task been executed yet? If not, we cannot wake it up! */
377 if (!RPC_IS_ACTIVATED(task)) {
378 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
382 __rpc_remove_wait_queue(queue, task);
384 rpc_make_runnable(task);
386 dprintk("RPC: __rpc_wake_up_task done\n");
390 * Wake up a queued task while the queue lock is being held
392 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
394 if (RPC_IS_QUEUED(task)) {
396 if (task->tk_waitqueue == queue)
397 __rpc_do_wake_up_task(queue, task);
402 * Tests whether rpc queue is empty
404 int rpc_queue_empty(struct rpc_wait_queue *queue)
408 spin_lock_bh(&queue->lock);
410 spin_unlock_bh(&queue->lock);
413 EXPORT_SYMBOL_GPL(rpc_queue_empty);
416 * Wake up a task on a specific queue
418 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
420 spin_lock_bh(&queue->lock);
421 rpc_wake_up_task_queue_locked(queue, task);
422 spin_unlock_bh(&queue->lock);
424 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
427 * Wake up the next task on a priority queue.
429 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
432 struct rpc_task *task;
435 * Service a batch of tasks from a single owner.
437 q = &queue->tasks[queue->priority];
438 if (!list_empty(q)) {
439 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
440 if (queue->owner == task->tk_owner) {
443 list_move_tail(&task->u.tk_wait.list, q);
446 * Check if we need to switch queues.
453 * Service the next queue.
456 if (q == &queue->tasks[0])
457 q = &queue->tasks[queue->maxpriority];
460 if (!list_empty(q)) {
461 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
464 } while (q != &queue->tasks[queue->priority]);
466 rpc_reset_waitqueue_priority(queue);
470 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
472 rpc_set_waitqueue_owner(queue, task->tk_owner);
474 rpc_wake_up_task_queue_locked(queue, task);
479 * Wake up the next task on the wait queue.
481 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
483 struct rpc_task *task = NULL;
485 dprintk("RPC: wake_up_next(%p \"%s\")\n",
486 queue, rpc_qname(queue));
487 spin_lock_bh(&queue->lock);
488 if (RPC_IS_PRIORITY(queue))
489 task = __rpc_wake_up_next_priority(queue);
491 task_for_first(task, &queue->tasks[0])
492 rpc_wake_up_task_queue_locked(queue, task);
494 spin_unlock_bh(&queue->lock);
498 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
501 * rpc_wake_up - wake up all rpc_tasks
502 * @queue: rpc_wait_queue on which the tasks are sleeping
506 void rpc_wake_up(struct rpc_wait_queue *queue)
508 struct list_head *head;
510 spin_lock_bh(&queue->lock);
511 head = &queue->tasks[queue->maxpriority];
513 while (!list_empty(head)) {
514 struct rpc_task *task;
515 task = list_first_entry(head,
518 rpc_wake_up_task_queue_locked(queue, task);
520 if (head == &queue->tasks[0])
524 spin_unlock_bh(&queue->lock);
526 EXPORT_SYMBOL_GPL(rpc_wake_up);
529 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
530 * @queue: rpc_wait_queue on which the tasks are sleeping
531 * @status: status value to set
535 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
537 struct list_head *head;
539 spin_lock_bh(&queue->lock);
540 head = &queue->tasks[queue->maxpriority];
542 while (!list_empty(head)) {
543 struct rpc_task *task;
544 task = list_first_entry(head,
547 task->tk_status = status;
548 rpc_wake_up_task_queue_locked(queue, task);
550 if (head == &queue->tasks[0])
554 spin_unlock_bh(&queue->lock);
556 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
558 static void __rpc_queue_timer_fn(unsigned long ptr)
560 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
561 struct rpc_task *task, *n;
562 unsigned long expires, now, timeo;
564 spin_lock(&queue->lock);
565 expires = now = jiffies;
566 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
567 timeo = task->u.tk_wait.expires;
568 if (time_after_eq(now, timeo)) {
569 dprintk("RPC: %5u timeout\n", task->tk_pid);
570 task->tk_status = -ETIMEDOUT;
571 rpc_wake_up_task_queue_locked(queue, task);
574 if (expires == now || time_after(expires, timeo))
577 if (!list_empty(&queue->timer_list.list))
578 rpc_set_queue_timer(queue, expires);
579 spin_unlock(&queue->lock);
582 static void __rpc_atrun(struct rpc_task *task)
588 * Run a task at a later time
590 void rpc_delay(struct rpc_task *task, unsigned long delay)
592 task->tk_timeout = delay;
593 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
595 EXPORT_SYMBOL_GPL(rpc_delay);
598 * Helper to call task->tk_ops->rpc_call_prepare
600 void rpc_prepare_task(struct rpc_task *task)
602 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
606 rpc_init_task_statistics(struct rpc_task *task)
608 /* Initialize retry counters */
609 task->tk_garb_retry = 2;
610 task->tk_cred_retry = 2;
611 task->tk_rebind_retry = 2;
613 /* starting timestamp */
614 task->tk_start = ktime_get();
618 rpc_reset_task_statistics(struct rpc_task *task)
620 task->tk_timeouts = 0;
621 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
623 rpc_init_task_statistics(task);
627 * Helper that calls task->tk_ops->rpc_call_done if it exists
629 void rpc_exit_task(struct rpc_task *task)
631 task->tk_action = NULL;
632 if (task->tk_ops->rpc_call_done != NULL) {
633 task->tk_ops->rpc_call_done(task, task->tk_calldata);
634 if (task->tk_action != NULL) {
635 WARN_ON(RPC_ASSASSINATED(task));
636 /* Always release the RPC slot and buffer memory */
638 rpc_reset_task_statistics(task);
643 void rpc_exit(struct rpc_task *task, int status)
645 task->tk_status = status;
646 task->tk_action = rpc_exit_task;
647 if (RPC_IS_QUEUED(task))
648 rpc_wake_up_queued_task(task->tk_waitqueue, task);
650 EXPORT_SYMBOL_GPL(rpc_exit);
652 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
654 if (ops->rpc_release != NULL)
655 ops->rpc_release(calldata);
659 * This is the RPC `scheduler' (or rather, the finite state machine).
661 static void __rpc_execute(struct rpc_task *task)
663 struct rpc_wait_queue *queue;
664 int task_is_async = RPC_IS_ASYNC(task);
667 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
668 task->tk_pid, task->tk_flags);
670 BUG_ON(RPC_IS_QUEUED(task));
673 void (*do_action)(struct rpc_task *);
676 * Execute any pending callback first.
678 do_action = task->tk_callback;
679 task->tk_callback = NULL;
680 if (do_action == NULL) {
682 * Perform the next FSM step.
683 * tk_action may be NULL if the task has been killed.
684 * In particular, note that rpc_killall_tasks may
685 * do this at any time, so beware when dereferencing.
687 do_action = task->tk_action;
688 if (do_action == NULL)
694 * Lockless check for whether task is sleeping or not.
696 if (!RPC_IS_QUEUED(task))
699 * The queue->lock protects against races with
700 * rpc_make_runnable().
702 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
703 * rpc_task, rpc_make_runnable() can assign it to a
704 * different workqueue. We therefore cannot assume that the
705 * rpc_task pointer may still be dereferenced.
707 queue = task->tk_waitqueue;
708 spin_lock_bh(&queue->lock);
709 if (!RPC_IS_QUEUED(task)) {
710 spin_unlock_bh(&queue->lock);
713 rpc_clear_running(task);
714 spin_unlock_bh(&queue->lock);
718 /* sync task: sleep here */
719 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
720 status = out_of_line_wait_on_bit(&task->tk_runstate,
721 RPC_TASK_QUEUED, rpc_wait_bit_killable,
723 if (status == -ERESTARTSYS) {
725 * When a sync task receives a signal, it exits with
726 * -ERESTARTSYS. In order to catch any callbacks that
727 * clean up after sleeping on some queue, we don't
728 * break the loop here, but go around once more.
730 dprintk("RPC: %5u got signal\n", task->tk_pid);
731 task->tk_flags |= RPC_TASK_KILLED;
732 rpc_exit(task, -ERESTARTSYS);
734 rpc_set_running(task);
735 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
738 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
740 /* Release all resources associated with the task */
741 rpc_release_task(task);
745 * User-visible entry point to the scheduler.
747 * This may be called recursively if e.g. an async NFS task updates
748 * the attributes and finds that dirty pages must be flushed.
749 * NOTE: Upon exit of this function the task is guaranteed to be
750 * released. In particular note that tk_release() will have
751 * been called, so your task memory may have been freed.
753 void rpc_execute(struct rpc_task *task)
755 rpc_set_active(task);
756 rpc_make_runnable(task);
757 if (!RPC_IS_ASYNC(task))
761 static void rpc_async_schedule(struct work_struct *work)
763 current->flags |= PF_FSTRANS;
764 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
765 current->flags &= ~PF_FSTRANS;
769 * rpc_malloc - allocate an RPC buffer
770 * @task: RPC task that will use this buffer
771 * @size: requested byte size
773 * To prevent rpciod from hanging, this allocator never sleeps,
774 * returning NULL if the request cannot be serviced immediately.
775 * The caller can arrange to sleep in a way that is safe for rpciod.
777 * Most requests are 'small' (under 2KiB) and can be serviced from a
778 * mempool, ensuring that NFS reads and writes can always proceed,
779 * and that there is good locality of reference for these buffers.
781 * In order to avoid memory starvation triggering more writebacks of
782 * NFS requests, we avoid using GFP_KERNEL.
784 void *rpc_malloc(struct rpc_task *task, size_t size)
786 struct rpc_buffer *buf;
787 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
789 size += sizeof(struct rpc_buffer);
790 if (size <= RPC_BUFFER_MAXSIZE)
791 buf = mempool_alloc(rpc_buffer_mempool, gfp);
793 buf = kmalloc(size, gfp);
799 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
800 task->tk_pid, size, buf);
803 EXPORT_SYMBOL_GPL(rpc_malloc);
806 * rpc_free - free buffer allocated via rpc_malloc
807 * @buffer: buffer to free
810 void rpc_free(void *buffer)
813 struct rpc_buffer *buf;
818 buf = container_of(buffer, struct rpc_buffer, data);
821 dprintk("RPC: freeing buffer of size %zu at %p\n",
824 if (size <= RPC_BUFFER_MAXSIZE)
825 mempool_free(buf, rpc_buffer_mempool);
829 EXPORT_SYMBOL_GPL(rpc_free);
832 * Creation and deletion of RPC task structures
834 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
836 memset(task, 0, sizeof(*task));
837 atomic_set(&task->tk_count, 1);
838 task->tk_flags = task_setup_data->flags;
839 task->tk_ops = task_setup_data->callback_ops;
840 task->tk_calldata = task_setup_data->callback_data;
841 INIT_LIST_HEAD(&task->tk_task);
843 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
844 task->tk_owner = current->tgid;
846 /* Initialize workqueue for async tasks */
847 task->tk_workqueue = task_setup_data->workqueue;
849 if (task->tk_ops->rpc_call_prepare != NULL)
850 task->tk_action = rpc_prepare_task;
852 rpc_init_task_statistics(task);
854 dprintk("RPC: new task initialized, procpid %u\n",
855 task_pid_nr(current));
858 static struct rpc_task *
861 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
865 * Create a new task for the specified client.
867 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
869 struct rpc_task *task = setup_data->task;
870 unsigned short flags = 0;
873 task = rpc_alloc_task();
875 rpc_release_calldata(setup_data->callback_ops,
876 setup_data->callback_data);
877 return ERR_PTR(-ENOMEM);
879 flags = RPC_TASK_DYNAMIC;
882 rpc_init_task(task, setup_data);
883 task->tk_flags |= flags;
884 dprintk("RPC: allocated task %p\n", task);
889 * rpc_free_task - release rpc task and perform cleanups
891 * Note that we free up the rpc_task _after_ rpc_release_calldata()
892 * in order to work around a workqueue dependency issue.
895 * "Workqueue currently considers two work items to be the same if they're
896 * on the same address and won't execute them concurrently - ie. it
897 * makes a work item which is queued again while being executed wait
898 * for the previous execution to complete.
900 * If a work function frees the work item, and then waits for an event
901 * which should be performed by another work item and *that* work item
902 * recycles the freed work item, it can create a false dependency loop.
903 * There really is no reliable way to detect this short of verifying
904 * every memory free."
907 static void rpc_free_task(struct rpc_task *task)
909 unsigned short tk_flags = task->tk_flags;
911 rpc_release_calldata(task->tk_ops, task->tk_calldata);
913 if (tk_flags & RPC_TASK_DYNAMIC) {
914 dprintk("RPC: %5u freeing task\n", task->tk_pid);
915 mempool_free(task, rpc_task_mempool);
919 static void rpc_async_release(struct work_struct *work)
921 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
924 static void rpc_release_resources_task(struct rpc_task *task)
927 if (task->tk_msg.rpc_cred) {
928 put_rpccred(task->tk_msg.rpc_cred);
929 task->tk_msg.rpc_cred = NULL;
931 rpc_task_release_client(task);
934 static void rpc_final_put_task(struct rpc_task *task,
935 struct workqueue_struct *q)
938 INIT_WORK(&task->u.tk_work, rpc_async_release);
939 queue_work(q, &task->u.tk_work);
944 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
946 if (atomic_dec_and_test(&task->tk_count)) {
947 rpc_release_resources_task(task);
948 rpc_final_put_task(task, q);
952 void rpc_put_task(struct rpc_task *task)
954 rpc_do_put_task(task, NULL);
956 EXPORT_SYMBOL_GPL(rpc_put_task);
958 void rpc_put_task_async(struct rpc_task *task)
960 rpc_do_put_task(task, task->tk_workqueue);
962 EXPORT_SYMBOL_GPL(rpc_put_task_async);
964 static void rpc_release_task(struct rpc_task *task)
966 dprintk("RPC: %5u release task\n", task->tk_pid);
968 BUG_ON (RPC_IS_QUEUED(task));
970 rpc_release_resources_task(task);
973 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
974 * so it should be safe to use task->tk_count as a test for whether
975 * or not any other processes still hold references to our rpc_task.
977 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
978 /* Wake up anyone who may be waiting for task completion */
979 if (!rpc_complete_task(task))
982 if (!atomic_dec_and_test(&task->tk_count))
985 rpc_final_put_task(task, task->tk_workqueue);
990 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
993 void rpciod_down(void)
995 module_put(THIS_MODULE);
999 * Start up the rpciod workqueue.
1001 static int rpciod_start(void)
1003 struct workqueue_struct *wq;
1006 * Create the rpciod thread and wait for it to start.
1008 dprintk("RPC: creating workqueue rpciod\n");
1009 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
1010 rpciod_workqueue = wq;
1011 return rpciod_workqueue != NULL;
1014 static void rpciod_stop(void)
1016 struct workqueue_struct *wq = NULL;
1018 if (rpciod_workqueue == NULL)
1020 dprintk("RPC: destroying workqueue rpciod\n");
1022 wq = rpciod_workqueue;
1023 rpciod_workqueue = NULL;
1024 destroy_workqueue(wq);
1028 rpc_destroy_mempool(void)
1031 if (rpc_buffer_mempool)
1032 mempool_destroy(rpc_buffer_mempool);
1033 if (rpc_task_mempool)
1034 mempool_destroy(rpc_task_mempool);
1036 kmem_cache_destroy(rpc_task_slabp);
1037 if (rpc_buffer_slabp)
1038 kmem_cache_destroy(rpc_buffer_slabp);
1039 rpc_destroy_wait_queue(&delay_queue);
1043 rpc_init_mempool(void)
1046 * The following is not strictly a mempool initialisation,
1047 * but there is no harm in doing it here
1049 rpc_init_wait_queue(&delay_queue, "delayq");
1050 if (!rpciod_start())
1053 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1054 sizeof(struct rpc_task),
1055 0, SLAB_HWCACHE_ALIGN,
1057 if (!rpc_task_slabp)
1059 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1061 0, SLAB_HWCACHE_ALIGN,
1063 if (!rpc_buffer_slabp)
1065 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1067 if (!rpc_task_mempool)
1069 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1071 if (!rpc_buffer_mempool)
1075 rpc_destroy_mempool();