Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Feb 2008 00:45:47 +0000 (11:45 +1100)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Feb 2008 00:45:47 +0000 (11:45 +1100)
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits)
  Remove commented-out code copied from NFS
  NFS: Switch from intr mount option to TASK_KILLABLE
  Add wait_for_completion_killable
  Add wait_event_killable
  Add schedule_timeout_killable
  Use mutex_lock_killable in vfs_readdir
  Add mutex_lock_killable
  Use lock_page_killable
  Add lock_page_killable
  Add fatal_signal_pending
  Add TASK_WAKEKILL
  exit: Use task_is_*
  signal: Use task_is_*
  sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL
  ptrace: Use task_is_*
  power: Use task_is_*
  wait: Use TASK_NORMAL
  proc/base.c: Use task_is_*
  proc/array.c: Use TASK_REPORT
  perfmon: Use task_is_*
  ...

Fixed up conflicts in NFS/sunrpc manually..

38 files changed:
arch/ia64/kernel/perfmon.c
fs/eventpoll.c
fs/nfs/client.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/mount_clnt.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/nfs/nfsroot.c
fs/nfs/pagelist.c
fs/nfs/super.c
fs/nfs/write.c
fs/proc/array.c
fs/proc/base.c
fs/readdir.c
fs/smbfs/request.c
include/linux/completion.h
include/linux/mutex.h
include/linux/nfs_fs.h
include/linux/nfs_mount.h
include/linux/pagemap.h
include/linux/sched.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/sched.h
include/linux/wait.h
kernel/exit.c
kernel/mutex.c
kernel/power/process.c
kernel/ptrace.c
kernel/sched.c
kernel/signal.c
kernel/timer.c
kernel/wait.c
mm/filemap.c
net/sunrpc/auth.c
net/sunrpc/clnt.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/sched.c

index 73e7c2e..5ae177f 100644 (file)
@@ -2631,7 +2631,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
         */
        if (task == current) return 0;
 
-       if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+       if (!task_is_stopped_or_traced(task)) {
                DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
                return -EBUSY;
        }
@@ -4792,7 +4792,7 @@ recheck:
         * the task must be stopped.
         */
        if (PFM_CMD_STOPPED(cmd)) {
-               if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+               if (!task_is_stopped_or_traced(task)) {
                        DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
                        return -EBUSY;
                }
index 34f68f3..81c04ab 100644 (file)
@@ -656,8 +656,7 @@ is_linked:
         * wait list.
         */
        if (waitqueue_active(&ep->wq))
-               __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-                                TASK_INTERRUPTIBLE);
+               wake_up_locked(&ep->wq);
        if (waitqueue_active(&ep->poll_wait))
                pwake++;
 
@@ -780,7 +779,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
                /* Notify waiting tasks that events are available */
                if (waitqueue_active(&ep->wq))
-                       __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+                       wake_up_locked(&ep->wq);
                if (waitqueue_active(&ep->poll_wait))
                        pwake++;
        }
@@ -854,8 +853,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
 
                        /* Notify waiting tasks that events are available */
                        if (waitqueue_active(&ep->wq))
-                               __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-                                                TASK_INTERRUPTIBLE);
+                               wake_up_locked(&ep->wq);
                        if (waitqueue_active(&ep->poll_wait))
                                pwake++;
                }
@@ -978,8 +976,7 @@ errxit:
                 * wait list (delayed after we release the lock).
                 */
                if (waitqueue_active(&ep->wq))
-                       __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-                                        TASK_INTERRUPTIBLE);
+                       wake_up_locked(&ep->wq);
                if (waitqueue_active(&ep->poll_wait))
                        pwake++;
        }
index 685c43f..c5c0175 100644 (file)
@@ -386,7 +386,7 @@ found_client:
        if (new)
                nfs_free_client(new);
 
-       error = wait_event_interruptible(nfs_client_active_wq,
+       error = wait_event_killable(nfs_client_active_wq,
                                clp->cl_cons_state != NFS_CS_INITING);
        if (error < 0) {
                nfs_put_client(clp);
@@ -589,10 +589,6 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
        if (server->flags & NFS_MOUNT_SOFT)
                server->client->cl_softrtry = 1;
 
-       server->client->cl_intr = 0;
-       if (server->flags & NFS4_MOUNT_INTR)
-               server->client->cl_intr = 1;
-
        return 0;
 }
 
index f8e165c..16844f9 100644 (file)
@@ -188,17 +188,12 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 {
        ssize_t result = -EIOCBQUEUED;
-       struct rpc_clnt *clnt;
-       sigset_t oldset;
 
        /* Async requests don't wait here */
        if (dreq->iocb)
                goto out;
 
-       clnt = NFS_CLIENT(dreq->inode);
-       rpc_clnt_sigmask(clnt, &oldset);
-       result = wait_for_completion_interruptible(&dreq->completion);
-       rpc_clnt_sigunmask(clnt, &oldset);
+       result = wait_for_completion_killable(&dreq->completion);
 
        if (!result)
                result = dreq->error;
index 3f332e5..966a885 100644 (file)
@@ -433,15 +433,11 @@ static int nfs_wait_schedule(void *word)
  */
 static int nfs_wait_on_inode(struct inode *inode)
 {
-       struct rpc_clnt *clnt = NFS_CLIENT(inode);
        struct nfs_inode *nfsi = NFS_I(inode);
-       sigset_t oldmask;
        int error;
 
-       rpc_clnt_sigmask(clnt, &oldmask);
        error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
-                                       nfs_wait_schedule, TASK_INTERRUPTIBLE);
-       rpc_clnt_sigunmask(clnt, &oldmask);
+                                       nfs_wait_schedule, TASK_KILLABLE);
 
        return error;
 }
index 8afd9f7..49c7cd0 100644 (file)
@@ -56,7 +56,7 @@ int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path,
                .program        = &mnt_program,
                .version        = version,
                .authflavor     = RPC_AUTH_UNIX,
-               .flags          = RPC_CLNT_CREATE_INTR,
+               .flags          = 0,
        };
        struct rpc_clnt         *mnt_clnt;
        int                     status;
index b353c1a..549dbce 100644 (file)
 static int
 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
 {
-       sigset_t oldset;
        int res;
-       rpc_clnt_sigmask(clnt, &oldset);
        do {
                res = rpc_call_sync(clnt, msg, flags);
                if (res != -EJUKEBOX)
                        break;
-               schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
+               schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
                res = -ERESTARTSYS;
-       } while (!signalled());
-       rpc_clnt_sigunmask(clnt, &oldset);
+       } while (!fatal_signal_pending(current));
        return res;
 }
 
index 5c189bd..027e109 100644 (file)
@@ -316,12 +316,9 @@ static void nfs4_opendata_put(struct nfs4_opendata *p)
 
 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
 {
-       sigset_t oldset;
        int ret;
 
-       rpc_clnt_sigmask(task->tk_client, &oldset);
        ret = rpc_wait_for_completion_task(task);
-       rpc_clnt_sigunmask(task->tk_client, &oldset);
        return ret;
 }
 
@@ -2785,9 +2782,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
        return 0;
 }
 
-static int nfs4_wait_bit_interruptible(void *word)
+static int nfs4_wait_bit_killable(void *word)
 {
-       if (signal_pending(current))
+       if (fatal_signal_pending(current))
                return -ERESTARTSYS;
        schedule();
        return 0;
@@ -2795,18 +2792,14 @@ static int nfs4_wait_bit_interruptible(void *word)
 
 static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
 {
-       sigset_t oldset;
        int res;
 
        might_sleep();
 
        rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
 
-       rpc_clnt_sigmask(clnt, &oldset);
        res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
-                       nfs4_wait_bit_interruptible,
-                       TASK_INTERRUPTIBLE);
-       rpc_clnt_sigunmask(clnt, &oldset);
+                       nfs4_wait_bit_killable, TASK_KILLABLE);
 
        rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
        return res;
@@ -2814,7 +2807,6 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
 
 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
 {
-       sigset_t oldset;
        int res = 0;
 
        might_sleep();
@@ -2823,14 +2815,9 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
                *timeout = NFS4_POLL_RETRY_MIN;
        if (*timeout > NFS4_POLL_RETRY_MAX)
                *timeout = NFS4_POLL_RETRY_MAX;
-       rpc_clnt_sigmask(clnt, &oldset);
-       if (clnt->cl_intr) {
-               schedule_timeout_interruptible(*timeout);
-               if (signalled())
-                       res = -ERESTARTSYS;
-       } else
-               schedule_timeout_uninterruptible(*timeout);
-       rpc_clnt_sigunmask(clnt, &oldset);
+       schedule_timeout_killable(*timeout);
+       if (fatal_signal_pending(current))
+               res = -ERESTARTSYS;
        *timeout <<= 1;
        return res;
 }
@@ -3069,7 +3056,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-       schedule_timeout_interruptible(timeout);
+       schedule_timeout_killable(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
index 4b03345..531379d 100644 (file)
@@ -228,10 +228,7 @@ static int __init root_nfs_parse(char *name, char *buf)
                                nfs_data.flags &= ~NFS_MOUNT_SOFT;
                                break;
                        case Opt_intr:
-                               nfs_data.flags |= NFS_MOUNT_INTR;
-                               break;
                        case Opt_nointr:
-                               nfs_data.flags &= ~NFS_MOUNT_INTR;
                                break;
                        case Opt_posix:
                                nfs_data.flags |= NFS_MOUNT_POSIX;
index 3b3dbb9..7f07920 100644 (file)
@@ -58,7 +58,6 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
                   struct page *page,
                   unsigned int offset, unsigned int count)
 {
-       struct nfs_server *server = NFS_SERVER(inode);
        struct nfs_page         *req;
 
        for (;;) {
@@ -67,7 +66,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
                if (req != NULL)
                        break;
 
-               if (signalled() && (server->flags & NFS_MOUNT_INTR))
+               if (fatal_signal_pending(current))
                        return ERR_PTR(-ERESTARTSYS);
                yield();
        }
@@ -177,11 +176,11 @@ void nfs_release_request(struct nfs_page *req)
        kref_put(&req->wb_kref, nfs_free_request);
 }
 
-static int nfs_wait_bit_interruptible(void *word)
+static int nfs_wait_bit_killable(void *word)
 {
        int ret = 0;
 
-       if (signal_pending(current))
+       if (fatal_signal_pending(current))
                ret = -ERESTARTSYS;
        else
                schedule();
@@ -192,26 +191,18 @@ static int nfs_wait_bit_interruptible(void *word)
  * nfs_wait_on_request - Wait for a request to complete.
  * @req: request to wait upon.
  *
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
  * The user is responsible for holding a count on the request.
  */
 int
 nfs_wait_on_request(struct nfs_page *req)
 {
-       struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
-       sigset_t oldmask;
        int ret = 0;
 
        if (!test_bit(PG_BUSY, &req->wb_flags))
                goto out;
-       /*
-        * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
-        *       are not interrupted if intr flag is not set
-        */
-       rpc_clnt_sigmask(clnt, &oldmask);
        ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
-                       nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
-       rpc_clnt_sigunmask(clnt, &oldmask);
+                       nfs_wait_bit_killable, TASK_KILLABLE);
 out:
        return ret;
 }
index 22c49c0..7f4505f 100644 (file)
@@ -448,7 +448,6 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
                const char *nostr;
        } nfs_info[] = {
                { NFS_MOUNT_SOFT, ",soft", ",hard" },
-               { NFS_MOUNT_INTR, ",intr", ",nointr" },
                { NFS_MOUNT_NOCTO, ",nocto", "" },
                { NFS_MOUNT_NOAC, ",noac", "" },
                { NFS_MOUNT_NONLM, ",nolock", "" },
@@ -708,10 +707,7 @@ static int nfs_parse_mount_options(char *raw,
                        mnt->flags &= ~NFS_MOUNT_SOFT;
                        break;
                case Opt_intr:
-                       mnt->flags |= NFS_MOUNT_INTR;
-                       break;
                case Opt_nointr:
-                       mnt->flags &= ~NFS_MOUNT_INTR;
                        break;
                case Opt_posix:
                        mnt->flags |= NFS_MOUNT_POSIX;
index 5ac5b27..522efff 100644 (file)
@@ -488,7 +488,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
 /*
  * Wait for a request to complete.
  *
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
  */
 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
 {
index eb97f28..b380313 100644 (file)
@@ -141,12 +141,7 @@ static const char *task_state_array[] = {
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-       unsigned int state = (tsk->state & (TASK_RUNNING |
-                                           TASK_INTERRUPTIBLE |
-                                           TASK_UNINTERRUPTIBLE |
-                                           TASK_STOPPED |
-                                           TASK_TRACED)) |
-                                          tsk->exit_state;
+       unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
        const char **p = &task_state_array[0];
 
        while (state) {
index 91fa8e6..9fa9708 100644 (file)
@@ -199,7 +199,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
        (task == current || \
        (task->parent == current && \
        (task->ptrace & PT_PTRACED) && \
-        (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+        (task_is_stopped_or_traced(task)) && \
         security_ptrace(current,task) == 0))
 
 struct mm_struct *mm_for_maps(struct task_struct *task)
index efe52e6..4e026e5 100644 (file)
@@ -30,7 +30,10 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
        if (res)
                goto out;
 
-       mutex_lock(&inode->i_mutex);
+       res = mutex_lock_killable(&inode->i_mutex);
+       if (res)
+               goto out;
+
        res = -ENOENT;
        if (!IS_DEADDIR(inode)) {
                res = file->f_op->readdir(file, buf, filler);
index ca4b2d5..45f4593 100644 (file)
@@ -105,7 +105,7 @@ struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
                 if (nfs_try_to_free_pages(server))
                        continue;
 
-               if (signalled() && (server->flags & NFS_MOUNT_INTR))
+               if (fatal_signal_pending(current))
                        return ERR_PTR(-ERESTARTSYS);
                current->policy = SCHED_YIELD;
                schedule();
index 33d6aaf..d2961b6 100644 (file)
@@ -44,6 +44,7 @@ static inline void init_completion(struct completion *x)
 
 extern void wait_for_completion(struct completion *);
 extern int wait_for_completion_interruptible(struct completion *x);
+extern int wait_for_completion_killable(struct completion *x);
 extern unsigned long wait_for_completion_timeout(struct completion *x,
                                                   unsigned long timeout);
 extern unsigned long wait_for_completion_interruptible_timeout(
index 6014797..05c5903 100644 (file)
@@ -125,15 +125,20 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
                                        unsigned int subclass);
+extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
+                                       unsigned int subclass);
 
 #define mutex_lock(lock) mutex_lock_nested(lock, 0)
 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
+#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
 #else
 extern void fastcall mutex_lock(struct mutex *lock);
 extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
+extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
 
 # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
 #endif
 
 /*
index 099ddb4..a69ba80 100644 (file)
@@ -556,14 +556,7 @@ extern void * nfs_root_data(void);
 
 #define nfs_wait_event(clnt, wq, condition)                            \
 ({                                                                     \
-       int __retval = 0;                                               \
-       if (clnt->cl_intr) {                                            \
-               sigset_t oldmask;                                       \
-               rpc_clnt_sigmask(clnt, &oldmask);                       \
-               __retval = wait_event_interruptible(wq, condition);     \
-               rpc_clnt_sigunmask(clnt, &oldmask);                     \
-       } else                                                          \
-               wait_event(wq, condition);                              \
+       int __retval = wait_event_killable(wq, condition);              \
        __retval;                                                       \
 })
 
index a3ade89..df7c6b7 100644 (file)
@@ -48,7 +48,7 @@ struct nfs_mount_data {
 /* bits in the flags field */
 
 #define NFS_MOUNT_SOFT         0x0001  /* 1 */
-#define NFS_MOUNT_INTR         0x0002  /* 1 */
+#define NFS_MOUNT_INTR         0x0002  /* 1 */ /* now unused, but ABI */
 #define NFS_MOUNT_SECURE       0x0004  /* 1 */
 #define NFS_MOUNT_POSIX                0x0008  /* 1 */
 #define NFS_MOUNT_NOCTO                0x0010  /* 1 */
index db8a410..4b62a10 100644 (file)
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -170,6 +171,19 @@ static inline void lock_page(struct page *page)
                __lock_page(page);
 }
 
+/*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+       might_sleep();
+       if (TestSetPageLocked(page))
+               return __lock_page_killable(page);
+       return 0;
+}
+
 /*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
index 9d47976..6c33357 100644 (file)
@@ -172,13 +172,35 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_RUNNING           0
 #define TASK_INTERRUPTIBLE     1
 #define TASK_UNINTERRUPTIBLE   2
-#define TASK_STOPPED           4
-#define TASK_TRACED            8
+#define __TASK_STOPPED         4
+#define __TASK_TRACED          8
 /* in tsk->exit_state */
 #define EXIT_ZOMBIE            16
 #define EXIT_DEAD              32
 /* in tsk->state again */
 #define TASK_DEAD              64
+#define TASK_WAKEKILL          128
+
+/* Convenience macros for the sake of set_task_state */
+#define TASK_KILLABLE          (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+#define TASK_STOPPED           (TASK_WAKEKILL | __TASK_STOPPED)
+#define TASK_TRACED            (TASK_WAKEKILL | __TASK_TRACED)
+
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL            (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL               (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT            (TASK_RUNNING | TASK_INTERRUPTIBLE | \
+                                TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+                                __TASK_TRACED)
+
+#define task_is_traced(task)   ((task->state & __TASK_TRACED) != 0)
+#define task_is_stopped(task)  ((task->state & __TASK_STOPPED) != 0)
+#define task_is_stopped_or_traced(task)        \
+                       ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_contributes_to_load(task) \
+                               ((task->state & TASK_UNINTERRUPTIBLE) != 0)
 
 #define __set_task_state(tsk, state_value)             \
        do { (tsk)->state = (state_value); } while (0)
@@ -302,6 +324,7 @@ extern int in_sched_functions(unsigned long addr);
 #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
 extern signed long FASTCALL(schedule_timeout(signed long timeout));
 extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_killable(signed long timeout);
 extern signed long schedule_timeout_uninterruptible(signed long timeout);
 asmlinkage void schedule(void);
 
@@ -1892,7 +1915,14 @@ static inline int signal_pending(struct task_struct *p)
 {
        return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+       return signal_pending(p) && __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
        return unlikely(test_thread_flag(TIF_NEED_RESCHED));
index 3e9addc..129a86e 100644 (file)
@@ -41,7 +41,6 @@ struct rpc_clnt {
        struct rpc_iostats *    cl_metrics;     /* per-client statistics */
 
        unsigned int            cl_softrtry : 1,/* soft timeouts */
-                               cl_intr     : 1,/* interruptible */
                                cl_discrtry : 1,/* disconnect before retry */
                                cl_autobind : 1;/* use getport() */
 
@@ -111,7 +110,6 @@ struct rpc_create_args {
 
 /* Values for "flags" field */
 #define RPC_CLNT_CREATE_HARDRTRY       (1UL << 0)
-#define RPC_CLNT_CREATE_INTR           (1UL << 1)
 #define RPC_CLNT_CREATE_AUTOBIND       (1UL << 2)
 #define RPC_CLNT_CREATE_NONPRIVPORT    (1UL << 3)
 #define RPC_CLNT_CREATE_NOPING         (1UL << 4)
@@ -137,8 +135,6 @@ int         rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
                               int flags);
 void           rpc_restart_call(struct rpc_task *);
-void           rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
-void           rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
 void           rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
 size_t         rpc_max_payload(struct rpc_clnt *);
 void           rpc_force_rebind(struct rpc_clnt *);
index ce3d1b1..f689f02 100644 (file)
@@ -137,7 +137,6 @@ struct rpc_task_setup {
 #define RPC_TASK_DYNAMIC       0x0080          /* task was kmalloc'ed */
 #define RPC_TASK_KILLED                0x0100          /* task was killed */
 #define RPC_TASK_SOFT          0x0200          /* Use soft timeouts */
-#define RPC_TASK_NOINTR                0x0400          /* uninterruptible task */
 
 #define RPC_IS_ASYNC(t)                ((t)->tk_flags & RPC_TASK_ASYNC)
 #define RPC_IS_SWAPPER(t)      ((t)->tk_flags & RPC_TASK_SWAPPER)
@@ -145,7 +144,6 @@ struct rpc_task_setup {
 #define RPC_ASSASSINATED(t)    ((t)->tk_flags & RPC_TASK_KILLED)
 #define RPC_DO_CALLBACK(t)     ((t)->tk_callback != NULL)
 #define RPC_IS_SOFT(t)         ((t)->tk_flags & RPC_TASK_SOFT)
-#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
 
 #define RPC_TASK_RUNNING       0
 #define RPC_TASK_QUEUED                1
index 0e68628..1f4fb0a 100644 (file)
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
 
-#define wake_up(x)                     __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr)              __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x)                 __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x)                     __wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr)              __wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x)                 __wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x)              __wake_up_locked((x), TASK_NORMAL)
+
 #define wake_up_interruptible(x)       __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 #define wake_up_interruptible_nr(x, nr)        __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 #define wake_up_interruptible_all(x)   __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define        wake_up_locked(x)               __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x)  __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 
 #define __wait_event(wq, condition)                                    \
 do {                                                                   \
@@ -345,6 +346,47 @@ do {                                                                       \
        __ret;                                                          \
 })
 
+#define __wait_event_killable(wq, condition, ret)                      \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait(&wq, &__wait, TASK_KILLABLE);           \
+               if (condition)                                          \
+                       break;                                          \
+               if (!fatal_signal_pending(current)) {                   \
+                       schedule();                                     \
+                       continue;                                       \
+               }                                                       \
+               ret = -ERESTARTSYS;                                     \
+               break;                                                  \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition)                             \
+({                                                                     \
+       int __ret = 0;                                                  \
+       if (!(condition))                                               \
+               __wait_event_killable(wq, condition, __ret);            \
+       __ret;                                                          \
+})
+
 /*
  * Must be called with the spinlock in the wait_queue_head_t held.
  */
index 549c055..bfb1c0e 100644 (file)
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
        struct task_struct *p;
 
        do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-               if (p->state != TASK_STOPPED)
+               if (!task_is_stopped(p))
                        continue;
                retval = 1;
                break;
@@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
                p->parent = p->real_parent;
                add_parent(p);
 
-               if (p->state == TASK_TRACED) {
+               if (task_is_traced(p)) {
                        /*
                         * If it was at a trace stop, turn it into
                         * a normal stop since it's no longer being
@@ -1563,60 +1563,51 @@ repeat:
                        }
                        allowed = 1;
 
-                       switch (p->state) {
-                       case TASK_TRACED:
-                               /*
-                                * When we hit the race with PTRACE_ATTACH,
-                                * we will not report this child.  But the
-                                * race means it has not yet been moved to
-                                * our ptrace_children list, so we need to
-                                * set the flag here to avoid a spurious ECHILD
-                                * when the race happens with the only child.
-                                */
-                               flag = 1;
-                               if (!my_ptrace_child(p))
-                                       continue;
-                               /*FALLTHROUGH*/
-                       case TASK_STOPPED:
+                       if (task_is_stopped_or_traced(p)) {
                                /*
                                 * It's stopped now, so it might later
                                 * continue, exit, or stop again.
+                                *
+                                * When we hit the race with PTRACE_ATTACH, we
+                                * will not report this child.  But the race
+                                * means it has not yet been moved to our
+                                * ptrace_children list, so we need to set the
+                                * flag here to avoid a spurious ECHILD when
+                                * the race happens with the only child.
                                 */
                                flag = 1;
-                               if (!(options & WUNTRACED) &&
-                                   !my_ptrace_child(p))
-                                       continue;
+
+                               if (!my_ptrace_child(p)) {
+                                       if (task_is_traced(p))
+                                               continue;
+                                       if (!(options & WUNTRACED))
+                                               continue;
+                               }
+
                                retval = wait_task_stopped(p, ret == 2,
-                                                          (options & WNOWAIT),
-                                                          infop,
-                                                          stat_addr, ru);
+                                               (options & WNOWAIT), infop,
+                                               stat_addr, ru);
                                if (retval == -EAGAIN)
                                        goto repeat;
                                if (retval != 0) /* He released the lock.  */
                                        goto end;
-                               break;
-                       default:
-                       // case EXIT_DEAD:
-                               if (p->exit_state == EXIT_DEAD)
+                       } else if (p->exit_state == EXIT_DEAD) {
+                               continue;
+                       } else if (p->exit_state == EXIT_ZOMBIE) {
+                               /*
+                                * Eligible but we cannot release it yet:
+                                */
+                               if (ret == 2)
+                                       goto check_continued;
+                               if (!likely(options & WEXITED))
                                        continue;
-                       // case EXIT_ZOMBIE:
-                               if (p->exit_state == EXIT_ZOMBIE) {
-                                       /*
-                                        * Eligible but we cannot release
-                                        * it yet:
-                                        */
-                                       if (ret == 2)
-                                               goto check_continued;
-                                       if (!likely(options & WEXITED))
-                                               continue;
-                                       retval = wait_task_zombie(
-                                               p, (options & WNOWAIT),
-                                               infop, stat_addr, ru);
-                                       /* He released the lock.  */
-                                       if (retval != 0)
-                                               goto end;
-                                       break;
-                               }
+                               retval = wait_task_zombie(p,
+                                               (options & WNOWAIT), infop,
+                                               stat_addr, ru);
+                               /* He released the lock.  */
+                               if (retval != 0)
+                                       goto end;
+                       } else {
 check_continued:
                                /*
                                 * It's running now, so it might later
@@ -1625,12 +1616,11 @@ check_continued:
                                flag = 1;
                                if (!unlikely(options & WCONTINUED))
                                        continue;
-                               retval = wait_task_continued(
-                                       p, (options & WNOWAIT),
-                                       infop, stat_addr, ru);
+                               retval = wait_task_continued(p,
+                                               (options & WNOWAIT), infop,
+                                               stat_addr, ru);
                                if (retval != 0) /* He released the lock.  */
                                        goto end;
-                               break;
                        }
                }
                if (!flag) {
index d7fe50c..d9ec9b6 100644 (file)
@@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * got a signal? (This code gets eliminated in the
                 * TASK_UNINTERRUPTIBLE case.)
                 */
-               if (unlikely(state == TASK_INTERRUPTIBLE &&
-                                               signal_pending(task))) {
-                       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+               if (unlikely((state == TASK_INTERRUPTIBLE &&
+                                       signal_pending(task)) ||
+                             (state == TASK_KILLABLE &&
+                                       fatal_signal_pending(task)))) {
+                       mutex_remove_waiter(lock, &waiter,
+                                           task_thread_info(task));
                        mutex_release(&lock->dep_map, 1, ip);
                        spin_unlock_mutex(&lock->wait_lock, flags);
 
@@ -210,6 +213,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 
 EXPORT_SYMBOL_GPL(mutex_lock_nested);
 
+int __sched
+mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
+{
+       might_sleep();
+       return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+
 int __sched
 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
@@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
  * mutex_lock_interruptible() and mutex_trylock().
  */
 static int fastcall noinline __sched
+__mutex_lock_killable_slowpath(atomic_t *lock_count);
+
+static noinline int fastcall __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 
 /***
@@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
 
 EXPORT_SYMBOL(mutex_lock_interruptible);
 
+int fastcall __sched mutex_lock_killable(struct mutex *lock)
+{
+       might_sleep();
+       return __mutex_fastpath_lock_retval
+                       (&lock->count, __mutex_lock_killable_slowpath);
+}
+EXPORT_SYMBOL(mutex_lock_killable);
+
 static void fastcall noinline __sched
 __mutex_lock_slowpath(atomic_t *lock_count)
 {
@@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
 }
 
 static int fastcall noinline __sched
+__mutex_lock_killable_slowpath(atomic_t *lock_count)
+{
+       struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+       return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
+}
+
+static noinline int fastcall __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
 {
        struct mutex *lock = container_of(lock_count, struct mutex, count);
index 6533923..7c2118f 100644 (file)
@@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
 
 static void send_fake_signal(struct task_struct *p)
 {
-       if (p->state == TASK_STOPPED)
+       if (task_is_stopped(p))
                force_sig_specific(SIGSTOP, p);
-       fake_signal_wake_up(p, p->state == TASK_STOPPED);
+       fake_signal_wake_up(p, task_is_stopped(p));
 }
 
 static int has_mm(struct task_struct *p)
@@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
                        if (frozen(p) || !freezeable(p))
                                continue;
 
-                       if (p->state == TASK_TRACED && frozen(p->parent)) {
+                       if (task_is_traced(p) && frozen(p->parent)) {
                                cancel_freezing(p);
                                continue;
                        }
index e6e9b8b..b0d4ab4 100644 (file)
@@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 void ptrace_untrace(struct task_struct *child)
 {
        spin_lock(&child->sighand->siglock);
-       if (child->state == TASK_TRACED) {
+       if (task_is_traced(child)) {
                if (child->signal->flags & SIGNAL_STOP_STOPPED) {
                        child->state = TASK_STOPPED;
                } else {
@@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
                add_parent(child);
        }
 
-       if (child->state == TASK_TRACED)
+       if (task_is_traced(child))
                ptrace_untrace(child);
 }
 
@@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
            && child->signal != NULL) {
                ret = 0;
                spin_lock_irq(&child->sighand->siglock);
-               if (child->state == TASK_STOPPED) {
+               if (task_is_stopped(child)) {
                        child->state = TASK_TRACED;
-               } else if (child->state != TASK_TRACED && !kill) {
+               } else if (!task_is_traced(child) && !kill) {
                        ret = -ESRCH;
                }
                spin_unlock_irq(&child->sighand->siglock);
index 8355e00..9474b23 100644 (file)
@@ -1350,7 +1350,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-       if (p->state == TASK_UNINTERRUPTIBLE)
+       if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
 
        enqueue_task(rq, p, wakeup);
@@ -1362,7 +1362,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-       if (p->state == TASK_UNINTERRUPTIBLE)
+       if (task_contributes_to_load(p))
                rq->nr_uninterruptible++;
 
        dequeue_task(rq, p, sleep);
@@ -1895,8 +1895,7 @@ out:
 
 int fastcall wake_up_process(struct task_struct *p)
 {
-       return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-                                TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+       return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -4124,8 +4123,7 @@ void complete(struct completion *x)
 
        spin_lock_irqsave(&x->wait.lock, flags);
        x->done++;
-       __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-                        1, 0, NULL);
+       __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
        spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -4136,8 +4134,7 @@ void complete_all(struct completion *x)
 
        spin_lock_irqsave(&x->wait.lock, flags);
        x->done += UINT_MAX/2;
-       __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-                        0, 0, NULL);
+       __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
        spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
@@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
                wait.flags |= WQ_FLAG_EXCLUSIVE;
                __add_wait_queue_tail(&x->wait, &wait);
                do {
-                       if (state == TASK_INTERRUPTIBLE &&
-                           signal_pending(current)) {
+                       if ((state == TASK_INTERRUPTIBLE &&
+                            signal_pending(current)) ||
+                           (state == TASK_KILLABLE &&
+                            fatal_signal_pending(current))) {
                                __remove_wait_queue(&x->wait, &wait);
                                return -ERESTARTSYS;
                        }
@@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
+int __sched wait_for_completion_killable(struct completion *x)
+{
+       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+       if (t == -ERESTARTSYS)
+               return t;
+       return 0;
+}
+EXPORT_SYMBOL(wait_for_completion_killable);
+
 static long __sched
 sleep_on_common(wait_queue_head_t *q, int state, long timeout)
 {
index bf49ce6..8054dd4 100644 (file)
@@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
        set_tsk_thread_flag(t, TIF_SIGPENDING);
 
        /*
-        * For SIGKILL, we want to wake it up in the stopped/traced case.
-        * We don't check t->state here because there is a race with it
+        * For SIGKILL, we want to wake it up in the stopped/traced/killable
+        * case. We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
         * By using wake_up_state, we ensure the process will wake up and
         * handle its death signal.
         */
        mask = TASK_INTERRUPTIBLE;
        if (resume)
-               mask |= TASK_STOPPED | TASK_TRACED;
+               mask |= TASK_WAKEKILL;
        if (!wake_up_state(t, mask))
                kick_process(t);
 }
@@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * Wake up the stopped thread _after_ setting
                         * TIF_SIGPENDING
                         */
-                       state = TASK_STOPPED;
+                       state = __TASK_STOPPED;
                        if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
                                set_tsk_thread_flag(t, TIF_SIGPENDING);
                                state |= TASK_INTERRUPTIBLE;
@@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
                return 0;
        if (sig == SIGKILL)
                return 1;
-       if (p->state & (TASK_STOPPED | TASK_TRACED))
+       if (task_is_stopped_or_traced(p))
                return 0;
        return task_curr(p) || !signal_pending(p);
 }
@@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
        }
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+       return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
@@ -1441,7 +1446,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        BUG_ON(sig == -1);
 
        /* do_notify_parent_cldstop should have been called instead.  */
-       BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+       BUG_ON(task_is_stopped_or_traced(tsk));
 
        BUG_ON(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1729,7 +1734,7 @@ static int do_signal_stop(int signr)
                         * so this check has no races.
                         */
                        if (!t->exit_state &&
-                           !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+                           !task_is_stopped_or_traced(t)) {
                                stop_count++;
                                signal_wake_up(t, 0);
                        }
index 23f7ead..9fbb472 100644 (file)
@@ -1099,6 +1099,13 @@ signed long __sched schedule_timeout_interruptible(signed long timeout)
 }
 EXPORT_SYMBOL(schedule_timeout_interruptible);
 
+signed long __sched schedule_timeout_killable(signed long timeout)
+{
+       __set_current_state(TASK_KILLABLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_killable);
+
 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
 {
        __set_current_state(TASK_UNINTERRUPTIBLE);
index 444ddbf..f987688 100644 (file)
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 {
        struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
        if (waitqueue_active(wq))
-               __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+               __wake_up(wq, TASK_NORMAL, 1, &key);
 }
 EXPORT_SYMBOL(__wake_up_bit);
 
index f4d0cde..89ce6fe 100644 (file)
@@ -185,6 +185,12 @@ static int sync_page(void *word)
        return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+       sync_page(word);
+       return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -589,6 +595,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+                                       sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -980,7 +994,8 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               lock_page(page);
+               if (lock_page_killable(page))
+                       goto readpage_eio;
 
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
@@ -1008,7 +1023,8 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       lock_page(page);
+                       if (lock_page_killable(page))
+                               goto readpage_eio;
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
@@ -1019,15 +1035,16 @@ readpage:
                                        goto find_page;
                                }
                                unlock_page(page);
-                               error = -EIO;
                                shrink_readahead_size_eio(filp, ra);
-                               goto readpage_error;
+                               goto readpage_eio;
                        }
                        unlock_page(page);
                }
 
                goto page_ok;
 
+readpage_eio:
+               error = -EIO;
 readpage_error:
                /* UHHUH! A synchronous read error occurred. Report it */
                desc->error = error;
index bcd9abd..eca941c 100644 (file)
@@ -385,7 +385,6 @@ rpcauth_bindcred(struct rpc_task *task)
                .group_info = current->group_info,
        };
        struct rpc_cred *ret;
-       sigset_t oldset;
        int flags = 0;
 
        dprintk("RPC: %5u looking up %s cred\n",
@@ -393,9 +392,7 @@ rpcauth_bindcred(struct rpc_task *task)
        get_group_info(acred.group_info);
        if (task->tk_flags & RPC_TASK_ROOTCREDS)
                flags |= RPCAUTH_LOOKUP_ROOTCREDS;
-       rpc_clnt_sigmask(task->tk_client, &oldset);
        ret = auth->au_ops->lookup_cred(auth, &acred, flags);
-       rpc_clnt_sigunmask(task->tk_client, &oldset);
        if (!IS_ERR(ret))
                task->tk_msg.rpc_cred = ret;
        else
index 924916c..0998e6d 100644 (file)
@@ -313,7 +313,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
                return clnt;
 
        if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
-               int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+               int err = rpc_ping(clnt, RPC_TASK_SOFT);
                if (err != 0) {
                        rpc_shutdown_client(clnt);
                        return ERR_PTR(err);
@@ -324,8 +324,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
                clnt->cl_softrtry = 0;
 
-       if (args->flags & RPC_CLNT_CREATE_INTR)
-               clnt->cl_intr = 1;
        if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
                clnt->cl_autobind = 1;
        if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
@@ -493,7 +491,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
        clnt->cl_prog     = program->number;
        clnt->cl_vers     = version->number;
        clnt->cl_stats    = program->stats;
-       err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+       err = rpc_ping(clnt, RPC_TASK_SOFT);
        if (err != 0) {
                rpc_shutdown_client(clnt);
                clnt = ERR_PTR(err);
@@ -515,46 +513,6 @@ static const struct rpc_call_ops rpc_default_ops = {
        .rpc_call_done = rpc_default_callback,
 };
 
-/*
- *     Export the signal mask handling for synchronous code that
- *     sleeps on RPC calls
- */
-#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
-
-static void rpc_save_sigmask(sigset_t *oldset, int intr)
-{
-       unsigned long   sigallow = sigmask(SIGKILL);
-       sigset_t sigmask;
-
-       /* Block all signals except those listed in sigallow */
-       if (intr)
-               sigallow |= RPC_INTR_SIGNALS;
-       siginitsetinv(&sigmask, sigallow);
-       sigprocmask(SIG_BLOCK, &sigmask, oldset);
-}
-
-static void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
-{
-       rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
-}
-
-static void rpc_restore_sigmask(sigset_t *oldset)
-{
-       sigprocmask(SIG_SETMASK, oldset, NULL);
-}
-
-void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
-{
-       rpc_save_sigmask(oldset, clnt->cl_intr);
-}
-EXPORT_SYMBOL_GPL(rpc_clnt_sigmask);
-
-void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
-{
-       rpc_restore_sigmask(oldset);
-}
-EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
-
 /**
  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
  * @task_setup_data: pointer to task initialisation data
@@ -562,7 +520,6 @@ EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
 {
        struct rpc_task *task, *ret;
-       sigset_t oldset;
 
        task = rpc_new_task(task_setup_data);
        if (task == NULL) {
@@ -578,13 +535,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
                goto out;
        }
        atomic_inc(&task->tk_count);
-       /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
-       if (!RPC_IS_ASYNC(task)) {
-               rpc_task_sigmask(task, &oldset);
-               rpc_execute(task);
-               rpc_restore_sigmask(&oldset);
-       } else
-               rpc_execute(task);
+       rpc_execute(task);
        ret = task;
 out:
        return ret;
index fa5b8f2..3164a08 100644 (file)
@@ -120,8 +120,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
                .program        = &rpcb_program,
                .version        = version,
                .authflavor     = RPC_AUTH_UNIX,
-               .flags          = (RPC_CLNT_CREATE_NOPING |
-                                  RPC_CLNT_CREATE_INTR),
+               .flags          = RPC_CLNT_CREATE_NOPING,
        };
 
        switch (srvaddr->sa_family) {
index 40ce6f6..4c66912 100644 (file)
@@ -245,9 +245,9 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 }
 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 
-static int rpc_wait_bit_interruptible(void *word)
+static int rpc_wait_bit_killable(void *word)
 {
-       if (signal_pending(current))
+       if (fatal_signal_pending(current))
                return -ERESTARTSYS;
        schedule();
        return 0;
@@ -299,9 +299,9 @@ static void rpc_mark_complete_task(struct rpc_task *task)
 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
 {
        if (action == NULL)
-               action = rpc_wait_bit_interruptible;
+               action = rpc_wait_bit_killable;
        return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
-                       action, TASK_INTERRUPTIBLE);
+                       action, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 
@@ -696,10 +696,9 @@ static void __rpc_execute(struct rpc_task *task)
 
                /* sync task: sleep here */
                dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
-               /* Note: Caller should be using rpc_clnt_sigmask() */
                status = out_of_line_wait_on_bit(&task->tk_runstate,
-                               RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
-                               TASK_INTERRUPTIBLE);
+                               RPC_TASK_QUEUED, rpc_wait_bit_killable,
+                               TASK_KILLABLE);
                if (status == -ERESTARTSYS) {
                        /*
                         * When a sync task receives a signal, it exits with
@@ -840,8 +839,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
                kref_get(&task->tk_client->cl_kref);
                if (task->tk_client->cl_softrtry)
                        task->tk_flags |= RPC_TASK_SOFT;
-               if (!task->tk_client->cl_intr)
-                       task->tk_flags |= RPC_TASK_NOINTR;
        }
 
        if (task->tk_ops->rpc_call_prepare != NULL)