2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
22 #include <linux/sunrpc/clnt.h>
27 #define RPCDBG_FACILITY RPCDBG_SCHED
31 * RPC slabs and memory pools
33 #define RPC_BUFFER_MAXSIZE (2048)
34 #define RPC_BUFFER_POOLSIZE (8)
35 #define RPC_TASK_POOLSIZE (8)
36 static struct kmem_cache *rpc_task_slabp __read_mostly;
37 static struct kmem_cache *rpc_buffer_slabp __read_mostly;
38 static mempool_t *rpc_task_mempool __read_mostly;
39 static mempool_t *rpc_buffer_mempool __read_mostly;
41 static void rpc_async_schedule(struct work_struct *);
42 static void rpc_release_task(struct rpc_task *task);
43 static void __rpc_queue_timer_fn(unsigned long ptr);
46 * RPC tasks sit here while waiting for conditions to improve.
48 static struct rpc_wait_queue delay_queue;
51 * rpciod-related stuff
53 struct workqueue_struct *rpciod_workqueue;
56 * Disable the timer for a given RPC task. Should be called with
57 * queue->lock and bh_disabled in order to avoid races within
61 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
63 if (task->tk_timeout == 0)
65 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
67 list_del(&task->u.tk_wait.timer_list);
68 if (list_empty(&queue->timer_list.list))
69 del_timer(&queue->timer_list.timer);
73 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
75 queue->timer_list.expires = expires;
76 mod_timer(&queue->timer_list.timer, expires);
80 * Set up a timer for the current task.
83 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
85 if (!task->tk_timeout)
88 dprintk("RPC: %5u setting alarm for %lu ms\n",
89 task->tk_pid, task->tk_timeout * 1000 / HZ);
91 task->u.tk_wait.expires = jiffies + task->tk_timeout;
92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
93 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
98 * Add new request to a priority queue.
100 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
101 struct rpc_task *task,
102 unsigned char queue_priority)
107 INIT_LIST_HEAD(&task->u.tk_wait.links);
108 q = &queue->tasks[queue_priority];
109 if (unlikely(queue_priority > queue->maxpriority))
110 q = &queue->tasks[queue->maxpriority];
111 list_for_each_entry(t, q, u.tk_wait.list) {
112 if (t->tk_owner == task->tk_owner) {
113 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
117 list_add_tail(&task->u.tk_wait.list, q);
121 * Add new request to wait queue.
123 * Swapper tasks always get inserted at the head of the queue.
124 * This should avoid many nasty memory deadlocks and hopefully
125 * improve overall performance.
126 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
128 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
129 struct rpc_task *task,
130 unsigned char queue_priority)
132 BUG_ON (RPC_IS_QUEUED(task));
134 if (RPC_IS_PRIORITY(queue))
135 __rpc_add_wait_queue_priority(queue, task, queue_priority);
136 else if (RPC_IS_SWAPPER(task))
137 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
139 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
140 task->tk_waitqueue = queue;
142 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
144 rpc_set_queued(task);
146 dprintk("RPC: %5u added to queue %p \"%s\"\n",
147 task->tk_pid, queue, rpc_qname(queue));
151 * Remove request from a priority queue.
153 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
157 if (!list_empty(&task->u.tk_wait.links)) {
158 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
159 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
160 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
165 * Remove request from queue.
166 * Note: must be called with spin lock held.
168 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
170 __rpc_disable_timer(queue, task);
171 if (RPC_IS_PRIORITY(queue))
172 __rpc_remove_wait_queue_priority(task);
173 list_del(&task->u.tk_wait.list);
175 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
176 task->tk_pid, queue, rpc_qname(queue));
179 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
181 queue->priority = priority;
182 queue->count = 1 << (priority * 2);
185 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
188 queue->nr = RPC_BATCH_COUNT;
191 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
193 rpc_set_waitqueue_priority(queue, queue->maxpriority);
194 rpc_set_waitqueue_owner(queue, 0);
197 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
201 spin_lock_init(&queue->lock);
202 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
203 INIT_LIST_HEAD(&queue->tasks[i]);
204 queue->maxpriority = nr_queues - 1;
205 rpc_reset_waitqueue_priority(queue);
207 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
208 INIT_LIST_HEAD(&queue->timer_list.list);
214 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
216 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
218 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
220 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
222 __rpc_init_priority_wait_queue(queue, qname, 1);
224 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
226 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
228 del_timer_sync(&queue->timer_list.timer);
230 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
232 static int rpc_wait_bit_killable(void *word)
234 if (fatal_signal_pending(current))
241 static void rpc_task_set_debuginfo(struct rpc_task *task)
243 static atomic_t rpc_pid;
245 task->tk_pid = atomic_inc_return(&rpc_pid);
248 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
253 static void rpc_set_active(struct rpc_task *task)
255 rpc_task_set_debuginfo(task);
256 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 * Mark an RPC call as having completed by clearing the 'active' bit
261 * and then waking up all tasks that were sleeping.
263 static int rpc_complete_task(struct rpc_task *task)
265 void *m = &task->tk_runstate;
266 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
267 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
271 spin_lock_irqsave(&wq->lock, flags);
272 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
273 ret = atomic_dec_and_test(&task->tk_count);
274 if (waitqueue_active(wq))
275 __wake_up_locked_key(wq, TASK_NORMAL, &k);
276 spin_unlock_irqrestore(&wq->lock, flags);
281 * Allow callers to wait for completion of an RPC call
283 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
284 * to enforce taking of the wq->lock and hence avoid races with
285 * rpc_complete_task().
287 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
290 action = rpc_wait_bit_killable;
291 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
292 action, TASK_KILLABLE);
294 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
297 * Make an RPC task runnable.
299 * Note: If the task is ASYNC, and is being made runnable after sitting on an
300 * rpc_wait_queue, this must be called with the queue spinlock held to protect
301 * the wait queue operation.
302 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
303 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
304 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
305 * the RPC_TASK_RUNNING flag.
307 static void rpc_make_runnable(struct rpc_task *task)
309 bool need_wakeup = !rpc_test_and_set_running(task);
311 rpc_clear_queued(task);
314 if (RPC_IS_ASYNC(task)) {
315 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
316 queue_work(rpciod_workqueue, &task->u.tk_work);
318 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
322 * Prepare for sleeping on a wait queue.
323 * By always appending tasks to the list we ensure FIFO behavior.
324 * NB: An RPC task will only receive interrupt-driven events as long
325 * as it's on a wait queue.
327 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
328 struct rpc_task *task,
330 unsigned char queue_priority)
332 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
333 task->tk_pid, rpc_qname(q), jiffies);
335 __rpc_add_wait_queue(q, task, queue_priority);
337 BUG_ON(task->tk_callback != NULL);
338 task->tk_callback = action;
339 __rpc_add_timer(q, task);
342 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
345 /* We shouldn't ever put an inactive task to sleep */
346 BUG_ON(!RPC_IS_ACTIVATED(task));
349 * Protect the queue operations.
351 spin_lock_bh(&q->lock);
352 __rpc_sleep_on_priority(q, task, action, task->tk_priority);
353 spin_unlock_bh(&q->lock);
355 EXPORT_SYMBOL_GPL(rpc_sleep_on);
357 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
358 rpc_action action, int priority)
360 /* We shouldn't ever put an inactive task to sleep */
361 BUG_ON(!RPC_IS_ACTIVATED(task));
364 * Protect the queue operations.
366 spin_lock_bh(&q->lock);
367 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
368 spin_unlock_bh(&q->lock);
372 * __rpc_do_wake_up_task - wake up a single rpc_task
374 * @task: task to be woken up
376 * Caller must hold queue->lock, and have cleared the task queued flag.
378 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
380 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
381 task->tk_pid, jiffies);
383 /* Has the task been executed yet? If not, we cannot wake it up! */
384 if (!RPC_IS_ACTIVATED(task)) {
385 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
389 __rpc_remove_wait_queue(queue, task);
391 rpc_make_runnable(task);
393 dprintk("RPC: __rpc_wake_up_task done\n");
397 * Wake up a queued task while the queue lock is being held
399 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
401 if (RPC_IS_QUEUED(task)) {
403 if (task->tk_waitqueue == queue)
404 __rpc_do_wake_up_task(queue, task);
409 * Tests whether rpc queue is empty
411 int rpc_queue_empty(struct rpc_wait_queue *queue)
415 spin_lock_bh(&queue->lock);
417 spin_unlock_bh(&queue->lock);
420 EXPORT_SYMBOL_GPL(rpc_queue_empty);
423 * Wake up a task on a specific queue
425 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
427 spin_lock_bh(&queue->lock);
428 rpc_wake_up_task_queue_locked(queue, task);
429 spin_unlock_bh(&queue->lock);
431 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
434 * Wake up the next task on a priority queue.
436 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
439 struct rpc_task *task;
442 * Service a batch of tasks from a single owner.
444 q = &queue->tasks[queue->priority];
445 if (!list_empty(q)) {
446 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
447 if (queue->owner == task->tk_owner) {
450 list_move_tail(&task->u.tk_wait.list, q);
453 * Check if we need to switch queues.
460 * Service the next queue.
463 if (q == &queue->tasks[0])
464 q = &queue->tasks[queue->maxpriority];
467 if (!list_empty(q)) {
468 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
471 } while (q != &queue->tasks[queue->priority]);
473 rpc_reset_waitqueue_priority(queue);
477 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
479 rpc_set_waitqueue_owner(queue, task->tk_owner);
481 rpc_wake_up_task_queue_locked(queue, task);
486 * Wake up the next task on the wait queue.
488 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
490 struct rpc_task *task = NULL;
492 dprintk("RPC: wake_up_next(%p \"%s\")\n",
493 queue, rpc_qname(queue));
494 spin_lock_bh(&queue->lock);
495 if (RPC_IS_PRIORITY(queue))
496 task = __rpc_wake_up_next_priority(queue);
498 task_for_first(task, &queue->tasks[0])
499 rpc_wake_up_task_queue_locked(queue, task);
501 spin_unlock_bh(&queue->lock);
505 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
508 * rpc_wake_up - wake up all rpc_tasks
509 * @queue: rpc_wait_queue on which the tasks are sleeping
513 void rpc_wake_up(struct rpc_wait_queue *queue)
515 struct list_head *head;
517 spin_lock_bh(&queue->lock);
518 head = &queue->tasks[queue->maxpriority];
520 while (!list_empty(head)) {
521 struct rpc_task *task;
522 task = list_first_entry(head,
525 rpc_wake_up_task_queue_locked(queue, task);
527 if (head == &queue->tasks[0])
531 spin_unlock_bh(&queue->lock);
533 EXPORT_SYMBOL_GPL(rpc_wake_up);
536 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
537 * @queue: rpc_wait_queue on which the tasks are sleeping
538 * @status: status value to set
542 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
544 struct list_head *head;
546 spin_lock_bh(&queue->lock);
547 head = &queue->tasks[queue->maxpriority];
549 while (!list_empty(head)) {
550 struct rpc_task *task;
551 task = list_first_entry(head,
554 task->tk_status = status;
555 rpc_wake_up_task_queue_locked(queue, task);
557 if (head == &queue->tasks[0])
561 spin_unlock_bh(&queue->lock);
563 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
565 static void __rpc_queue_timer_fn(unsigned long ptr)
567 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
568 struct rpc_task *task, *n;
569 unsigned long expires, now, timeo;
571 spin_lock(&queue->lock);
572 expires = now = jiffies;
573 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
574 timeo = task->u.tk_wait.expires;
575 if (time_after_eq(now, timeo)) {
576 dprintk("RPC: %5u timeout\n", task->tk_pid);
577 task->tk_status = -ETIMEDOUT;
578 rpc_wake_up_task_queue_locked(queue, task);
581 if (expires == now || time_after(expires, timeo))
584 if (!list_empty(&queue->timer_list.list))
585 rpc_set_queue_timer(queue, expires);
586 spin_unlock(&queue->lock);
589 static void __rpc_atrun(struct rpc_task *task)
595 * Run a task at a later time
597 void rpc_delay(struct rpc_task *task, unsigned long delay)
599 task->tk_timeout = delay;
600 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
602 EXPORT_SYMBOL_GPL(rpc_delay);
605 * Helper to call task->tk_ops->rpc_call_prepare
607 void rpc_prepare_task(struct rpc_task *task)
609 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
613 rpc_init_task_statistics(struct rpc_task *task)
615 /* Initialize retry counters */
616 task->tk_garb_retry = 2;
617 task->tk_cred_retry = 2;
618 task->tk_rebind_retry = 2;
620 /* starting timestamp */
621 task->tk_start = ktime_get();
625 rpc_reset_task_statistics(struct rpc_task *task)
627 task->tk_timeouts = 0;
628 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
630 rpc_init_task_statistics(task);
634 * Helper that calls task->tk_ops->rpc_call_done if it exists
636 void rpc_exit_task(struct rpc_task *task)
638 task->tk_action = NULL;
639 if (task->tk_ops->rpc_call_done != NULL) {
640 task->tk_ops->rpc_call_done(task, task->tk_calldata);
641 if (task->tk_action != NULL) {
642 WARN_ON(RPC_ASSASSINATED(task));
643 /* Always release the RPC slot and buffer memory */
645 rpc_reset_task_statistics(task);
650 void rpc_exit(struct rpc_task *task, int status)
652 task->tk_status = status;
653 task->tk_action = rpc_exit_task;
654 if (RPC_IS_QUEUED(task))
655 rpc_wake_up_queued_task(task->tk_waitqueue, task);
657 EXPORT_SYMBOL_GPL(rpc_exit);
659 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
661 if (ops->rpc_release != NULL)
662 ops->rpc_release(calldata);
666 * This is the RPC `scheduler' (or rather, the finite state machine).
668 static void __rpc_execute(struct rpc_task *task)
670 struct rpc_wait_queue *queue;
671 int task_is_async = RPC_IS_ASYNC(task);
674 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
675 task->tk_pid, task->tk_flags);
677 BUG_ON(RPC_IS_QUEUED(task));
680 void (*do_action)(struct rpc_task *);
683 * Execute any pending callback first.
685 do_action = task->tk_callback;
686 task->tk_callback = NULL;
687 if (do_action == NULL) {
689 * Perform the next FSM step.
690 * tk_action may be NULL if the task has been killed.
691 * In particular, note that rpc_killall_tasks may
692 * do this at any time, so beware when dereferencing.
694 do_action = task->tk_action;
695 if (do_action == NULL)
701 * Lockless check for whether task is sleeping or not.
703 if (!RPC_IS_QUEUED(task))
706 * The queue->lock protects against races with
707 * rpc_make_runnable().
709 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
710 * rpc_task, rpc_make_runnable() can assign it to a
711 * different workqueue. We therefore cannot assume that the
712 * rpc_task pointer may still be dereferenced.
714 queue = task->tk_waitqueue;
715 spin_lock_bh(&queue->lock);
716 if (!RPC_IS_QUEUED(task)) {
717 spin_unlock_bh(&queue->lock);
720 rpc_clear_running(task);
721 spin_unlock_bh(&queue->lock);
725 /* sync task: sleep here */
726 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
727 status = out_of_line_wait_on_bit(&task->tk_runstate,
728 RPC_TASK_QUEUED, rpc_wait_bit_killable,
730 if (status == -ERESTARTSYS) {
732 * When a sync task receives a signal, it exits with
733 * -ERESTARTSYS. In order to catch any callbacks that
734 * clean up after sleeping on some queue, we don't
735 * break the loop here, but go around once more.
737 dprintk("RPC: %5u got signal\n", task->tk_pid);
738 task->tk_flags |= RPC_TASK_KILLED;
739 rpc_exit(task, -ERESTARTSYS);
741 rpc_set_running(task);
742 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
745 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
747 /* Release all resources associated with the task */
748 rpc_release_task(task);
752 * User-visible entry point to the scheduler.
754 * This may be called recursively if e.g. an async NFS task updates
755 * the attributes and finds that dirty pages must be flushed.
756 * NOTE: Upon exit of this function the task is guaranteed to be
757 * released. In particular note that tk_release() will have
758 * been called, so your task memory may have been freed.
760 void rpc_execute(struct rpc_task *task)
762 rpc_set_active(task);
763 rpc_make_runnable(task);
764 if (!RPC_IS_ASYNC(task))
768 static void rpc_async_schedule(struct work_struct *work)
770 current->flags |= PF_FSTRANS;
771 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
772 current->flags &= ~PF_FSTRANS;
776 * rpc_malloc - allocate an RPC buffer
777 * @task: RPC task that will use this buffer
778 * @size: requested byte size
780 * To prevent rpciod from hanging, this allocator never sleeps,
781 * returning NULL if the request cannot be serviced immediately.
782 * The caller can arrange to sleep in a way that is safe for rpciod.
784 * Most requests are 'small' (under 2KiB) and can be serviced from a
785 * mempool, ensuring that NFS reads and writes can always proceed,
786 * and that there is good locality of reference for these buffers.
788 * In order to avoid memory starvation triggering more writebacks of
789 * NFS requests, we avoid using GFP_KERNEL.
791 void *rpc_malloc(struct rpc_task *task, size_t size)
793 struct rpc_buffer *buf;
794 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
796 size += sizeof(struct rpc_buffer);
797 if (size <= RPC_BUFFER_MAXSIZE)
798 buf = mempool_alloc(rpc_buffer_mempool, gfp);
800 buf = kmalloc(size, gfp);
806 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
807 task->tk_pid, size, buf);
810 EXPORT_SYMBOL_GPL(rpc_malloc);
813 * rpc_free - free buffer allocated via rpc_malloc
814 * @buffer: buffer to free
817 void rpc_free(void *buffer)
820 struct rpc_buffer *buf;
825 buf = container_of(buffer, struct rpc_buffer, data);
828 dprintk("RPC: freeing buffer of size %zu at %p\n",
831 if (size <= RPC_BUFFER_MAXSIZE)
832 mempool_free(buf, rpc_buffer_mempool);
836 EXPORT_SYMBOL_GPL(rpc_free);
839 * Creation and deletion of RPC task structures
841 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
843 memset(task, 0, sizeof(*task));
844 atomic_set(&task->tk_count, 1);
845 task->tk_flags = task_setup_data->flags;
846 task->tk_ops = task_setup_data->callback_ops;
847 task->tk_calldata = task_setup_data->callback_data;
848 INIT_LIST_HEAD(&task->tk_task);
850 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
851 task->tk_owner = current->tgid;
853 /* Initialize workqueue for async tasks */
854 task->tk_workqueue = task_setup_data->workqueue;
856 if (task->tk_ops->rpc_call_prepare != NULL)
857 task->tk_action = rpc_prepare_task;
859 rpc_init_task_statistics(task);
861 dprintk("RPC: new task initialized, procpid %u\n",
862 task_pid_nr(current));
865 static struct rpc_task *
868 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
872 * Create a new task for the specified client.
874 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
876 struct rpc_task *task = setup_data->task;
877 unsigned short flags = 0;
880 task = rpc_alloc_task();
882 rpc_release_calldata(setup_data->callback_ops,
883 setup_data->callback_data);
884 return ERR_PTR(-ENOMEM);
886 flags = RPC_TASK_DYNAMIC;
889 rpc_init_task(task, setup_data);
890 task->tk_flags |= flags;
891 dprintk("RPC: allocated task %p\n", task);
896 * rpc_free_task - release rpc task and perform cleanups
898 * Note that we free up the rpc_task _after_ rpc_release_calldata()
899 * in order to work around a workqueue dependency issue.
902 * "Workqueue currently considers two work items to be the same if they're
903 * on the same address and won't execute them concurrently - ie. it
904 * makes a work item which is queued again while being executed wait
905 * for the previous execution to complete.
907 * If a work function frees the work item, and then waits for an event
908 * which should be performed by another work item and *that* work item
909 * recycles the freed work item, it can create a false dependency loop.
910 * There really is no reliable way to detect this short of verifying
911 * every memory free."
914 static void rpc_free_task(struct rpc_task *task)
916 unsigned short tk_flags = task->tk_flags;
918 rpc_release_calldata(task->tk_ops, task->tk_calldata);
920 if (tk_flags & RPC_TASK_DYNAMIC) {
921 dprintk("RPC: %5u freeing task\n", task->tk_pid);
922 mempool_free(task, rpc_task_mempool);
926 static void rpc_async_release(struct work_struct *work)
928 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
931 static void rpc_release_resources_task(struct rpc_task *task)
934 if (task->tk_msg.rpc_cred) {
935 put_rpccred(task->tk_msg.rpc_cred);
936 task->tk_msg.rpc_cred = NULL;
938 rpc_task_release_client(task);
941 static void rpc_final_put_task(struct rpc_task *task,
942 struct workqueue_struct *q)
945 INIT_WORK(&task->u.tk_work, rpc_async_release);
946 queue_work(q, &task->u.tk_work);
951 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
953 if (atomic_dec_and_test(&task->tk_count)) {
954 rpc_release_resources_task(task);
955 rpc_final_put_task(task, q);
959 void rpc_put_task(struct rpc_task *task)
961 rpc_do_put_task(task, NULL);
963 EXPORT_SYMBOL_GPL(rpc_put_task);
965 void rpc_put_task_async(struct rpc_task *task)
967 rpc_do_put_task(task, task->tk_workqueue);
969 EXPORT_SYMBOL_GPL(rpc_put_task_async);
971 static void rpc_release_task(struct rpc_task *task)
973 dprintk("RPC: %5u release task\n", task->tk_pid);
975 BUG_ON (RPC_IS_QUEUED(task));
977 rpc_release_resources_task(task);
980 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
981 * so it should be safe to use task->tk_count as a test for whether
982 * or not any other processes still hold references to our rpc_task.
984 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
985 /* Wake up anyone who may be waiting for task completion */
986 if (!rpc_complete_task(task))
989 if (!atomic_dec_and_test(&task->tk_count))
992 rpc_final_put_task(task, task->tk_workqueue);
997 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1000 void rpciod_down(void)
1002 module_put(THIS_MODULE);
1006 * Start up the rpciod workqueue.
1008 static int rpciod_start(void)
1010 struct workqueue_struct *wq;
1013 * Create the rpciod thread and wait for it to start.
1015 dprintk("RPC: creating workqueue rpciod\n");
1016 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
1017 rpciod_workqueue = wq;
1018 return rpciod_workqueue != NULL;
1021 static void rpciod_stop(void)
1023 struct workqueue_struct *wq = NULL;
1025 if (rpciod_workqueue == NULL)
1027 dprintk("RPC: destroying workqueue rpciod\n");
1029 wq = rpciod_workqueue;
1030 rpciod_workqueue = NULL;
1031 destroy_workqueue(wq);
1035 rpc_destroy_mempool(void)
1038 if (rpc_buffer_mempool)
1039 mempool_destroy(rpc_buffer_mempool);
1040 if (rpc_task_mempool)
1041 mempool_destroy(rpc_task_mempool);
1043 kmem_cache_destroy(rpc_task_slabp);
1044 if (rpc_buffer_slabp)
1045 kmem_cache_destroy(rpc_buffer_slabp);
1046 rpc_destroy_wait_queue(&delay_queue);
1050 rpc_init_mempool(void)
1053 * The following is not strictly a mempool initialisation,
1054 * but there is no harm in doing it here
1056 rpc_init_wait_queue(&delay_queue, "delayq");
1057 if (!rpciod_start())
1060 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1061 sizeof(struct rpc_task),
1062 0, SLAB_HWCACHE_ALIGN,
1064 if (!rpc_task_slabp)
1066 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1068 0, SLAB_HWCACHE_ALIGN,
1070 if (!rpc_buffer_slabp)
1072 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1074 if (!rpc_task_mempool)
1076 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1078 if (!rpc_buffer_mempool)
1082 rpc_destroy_mempool();