[PATCH] RPC: Shrink struct rpc_task by switching to wait_on_bit()
authorTrond Myklebust <Trond.Myklebust@netapp.com>
Wed, 22 Jun 2005 17:16:21 +0000 (17:16 +0000)
committerTrond Myklebust <Trond.Myklebust@netapp.com>
Wed, 22 Jun 2005 20:07:07 +0000 (16:07 -0400)
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
include/linux/sunrpc/sched.h
net/sunrpc/sched.c

index 99d17ed..4d77e90 100644 (file)
@@ -31,7 +31,6 @@ struct rpc_wait_queue;
 struct rpc_wait {
        struct list_head        list;           /* wait queue links */
        struct list_head        links;          /* Links to related tasks */
-       wait_queue_head_t       waitq;          /* sync: sleep on this q */
        struct rpc_wait_queue * rpc_waitq;      /* RPC wait queue we're on */
 };
 
index cc298fa..2d9eb7f 100644 (file)
@@ -290,7 +290,7 @@ static void rpc_make_runnable(struct rpc_task *task)
                        return;
                }
        } else
-               wake_up(&task->u.tk_wait.waitq);
+               wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 }
 
 /*
@@ -578,6 +578,14 @@ static inline int __rpc_do_exit(struct rpc_task *task)
        return 1;
 }
 
+static int rpc_wait_bit_interruptible(void *word)
+{
+       if (signal_pending(current))
+               return -ERESTARTSYS;
+       schedule();
+       return 0;
+}
+
 /*
  * This is the RPC `scheduler' (or rather, the finite state machine).
  */
@@ -648,22 +656,21 @@ static int __rpc_execute(struct rpc_task *task)
 
                /* sync task: sleep here */
                dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
-               if (RPC_TASK_UNINTERRUPTIBLE(task)) {
-                       __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task));
-               } else {
-                       __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status);
+               /* Note: Caller should be using rpc_clnt_sigmask() */
+               status = out_of_line_wait_on_bit(&task->tk_runstate,
+                               RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
+                               TASK_INTERRUPTIBLE);
+               if (status == -ERESTARTSYS) {
                        /*
                         * When a sync task receives a signal, it exits with
                         * -ERESTARTSYS. In order to catch any callbacks that
                         * clean up after sleeping on some queue, we don't
                         * break the loop here, but go around once more.
                         */
-                       if (status == -ERESTARTSYS) {
-                               dprintk("RPC: %4d got signal\n", task->tk_pid);
-                               task->tk_flags |= RPC_TASK_KILLED;
-                               rpc_exit(task, -ERESTARTSYS);
-                               rpc_wake_up_task(task);
-                       }
+                       dprintk("RPC: %4d got signal\n", task->tk_pid);
+                       task->tk_flags |= RPC_TASK_KILLED;
+                       rpc_exit(task, -ERESTARTSYS);
+                       rpc_wake_up_task(task);
                }
                rpc_set_running(task);
                dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
@@ -766,8 +773,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call
 
        /* Initialize workqueue for async tasks */
        task->tk_workqueue = rpciod_workqueue;
-       if (!RPC_IS_ASYNC(task))
-               init_waitqueue_head(&task->u.tk_wait.waitq);
 
        if (clnt) {
                atomic_inc(&clnt->cl_users);