Merge branch 'core/urgent' into core/futexes
authorThomas Gleixner <tglx@linutronix.de>
Wed, 20 May 2009 07:02:28 +0000 (09:02 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 20 May 2009 07:02:28 +0000 (09:02 +0200)
Merge reason: this branch was on an pre -rc1 base, merge it up to -rc6+
              to get the latest upstream fixes.

Conflicts:
kernel/futex.c

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1  2 
kernel/futex.c

diff --combined kernel/futex.c
   *  PRIVATE futexes by Eric Dumazet
   *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
   *
 + *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
 + *  Copyright (C) IBM Corporation, 2009
 + *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
 + *
   *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
   *  enough at me, Linus for the original (flawed) idea, Matthew
   *  Kirkwood for proof-of-concept implementation.
@@@ -100,8 -96,8 +100,8 @@@ struct futex_pi_state 
   */
  struct futex_q {
        struct plist_node list;
 -      /* There can only be a single waiter */
 -      wait_queue_head_t waiter;
 +      /* Waiter reference */
 +      struct task_struct *task;
  
        /* Which hash list lock to use: */
        spinlock_t *lock_ptr;
  
        /* Optional priority inheritance state: */
        struct futex_pi_state *pi_state;
 -      struct task_struct *task;
 +
 +      /* rt_waiter storage for requeue_pi: */
 +      struct rt_mutex_waiter *rt_waiter;
  
        /* Bitset for the optional bitmasked wakeup */
        u32 bitset;
@@@ -199,6 -193,7 +199,7 @@@ static void drop_futex_key_refs(union f
   * @uaddr: virtual address of the futex
   * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
   * @key: address where result is stored.
+  * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
   *
   * Returns a negative error code or 0
   * The key words are stored in *key on success.
   *
   * lock_page() might sleep, the caller should not hold a spinlock.
   */
- static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+ static int
+ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
  {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
         *        but access_ok() should be faster than find_vma()
         */
        if (!fshared) {
-               if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+               if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
                        return -EFAULT;
                key->private.mm = mm;
                key->private.address = address;
        }
  
  again:
-       err = get_user_pages_fast(address, 1, 0, &page);
+       err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
        if (err < 0)
                return err;
  
@@@ -282,25 -278,6 +284,25 @@@ void put_futex_key(int fshared, union f
        drop_futex_key_refs(key);
  }
  
 +/**
 + * futex_top_waiter() - Return the highest priority waiter on a futex
 + * @hb:     the hash bucket the futex_q's reside in
 + * @key:    the futex key (to distinguish it from other futex futex_q's)
 + *
 + * Must be called with the hb lock held.
 + */
 +static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
 +                                      union futex_key *key)
 +{
 +      struct futex_q *this;
 +
 +      plist_for_each_entry(this, &hb->chain, list) {
 +              if (match_futex(&this->key, key))
 +                      return this;
 +      }
 +      return NULL;
 +}
 +
  static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
  {
        u32 curval;
@@@ -562,160 -539,28 +564,160 @@@ lookup_pi_state(u32 uval, struct futex_
        return 0;
  }
  
 +/**
 + * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
 + * @uaddr:            the pi futex user address
 + * @hb:                       the pi futex hash bucket
 + * @key:              the futex key associated with uaddr and hb
 + * @ps:                       the pi_state pointer where we store the result of the
 + *                    lookup
 + * @task:             the task to perform the atomic lock work for.  This will
 + *                    be "current" except in the case of requeue pi.
 + * @set_waiters:      force setting the FUTEX_WAITERS bit (1) or not (0)
 + *
 + * Returns:
 + *  0 - ready to wait
 + *  1 - acquired the lock
 + * <0 - error
 + *
 + * The hb->lock and futex_key refs shall be held by the caller.
 + */
 +static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
 +                              union futex_key *key,
 +                              struct futex_pi_state **ps,
 +                              struct task_struct *task, int set_waiters)
 +{
 +      int lock_taken, ret, ownerdied = 0;
 +      u32 uval, newval, curval;
 +
 +retry:
 +      ret = lock_taken = 0;
 +
 +      /*
 +       * To avoid races, we attempt to take the lock here again
 +       * (by doing a 0 -> TID atomic cmpxchg), while holding all
 +       * the locks. It will most likely not succeed.
 +       */
 +      newval = task_pid_vnr(task);
 +      if (set_waiters)
 +              newval |= FUTEX_WAITERS;
 +
 +      curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
 +
 +      if (unlikely(curval == -EFAULT))
 +              return -EFAULT;
 +
 +      /*
 +       * Detect deadlocks.
 +       */
 +      if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
 +              return -EDEADLK;
 +
 +      /*
 +       * Surprise - we got the lock. Just return to userspace:
 +       */
 +      if (unlikely(!curval))
 +              return 1;
 +
 +      uval = curval;
 +
 +      /*
 +       * Set the FUTEX_WAITERS flag, so the owner will know it has someone
 +       * to wake at the next unlock.
 +       */
 +      newval = curval | FUTEX_WAITERS;
 +
 +      /*
 +       * There are two cases, where a futex might have no owner (the
 +       * owner TID is 0): OWNER_DIED. We take over the futex in this
 +       * case. We also do an unconditional take over, when the owner
 +       * of the futex died.
 +       *
 +       * This is safe as we are protected by the hash bucket lock !
 +       */
 +      if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
 +              /* Keep the OWNER_DIED bit */
 +              newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
 +              ownerdied = 0;
 +              lock_taken = 1;
 +      }
 +
 +      curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
 +
 +      if (unlikely(curval == -EFAULT))
 +              return -EFAULT;
 +      if (unlikely(curval != uval))
 +              goto retry;
 +
 +      /*
 +       * We took the lock due to owner died take over.
 +       */
 +      if (unlikely(lock_taken))
 +              return 1;
 +
 +      /*
 +       * We dont have the lock. Look up the PI state (or create it if
 +       * we are the first waiter):
 +       */
 +      ret = lookup_pi_state(uval, hb, key, ps);
 +
 +      if (unlikely(ret)) {
 +              switch (ret) {
 +              case -ESRCH:
 +                      /*
 +                       * No owner found for this futex. Check if the
 +                       * OWNER_DIED bit is set to figure out whether
 +                       * this is a robust futex or not.
 +                       */
 +                      if (get_futex_value_locked(&curval, uaddr))
 +                              return -EFAULT;
 +
 +                      /*
 +                       * We simply start over in case of a robust
 +                       * futex. The code above will take the futex
 +                       * and return happy.
 +                       */
 +                      if (curval & FUTEX_OWNER_DIED) {
 +                              ownerdied = 1;
 +                              goto retry;
 +                      }
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      return ret;
 +}
 +
  /*
   * The hash bucket lock must be held when this is called.
   * Afterwards, the futex_q must not be accessed.
   */
  static void wake_futex(struct futex_q *q)
  {
 -      plist_del(&q->list, &q->list.plist);
 +      struct task_struct *p = q->task;
 +
        /*
 -       * The lock in wake_up_all() is a crucial memory barrier after the
 -       * plist_del() and also before assigning to q->lock_ptr.
 +       * We set q->lock_ptr = NULL _before_ we wake up the task. If
 +       * a non futex wake up happens on another CPU then the task
 +       * might exit and p would dereference a non existing task
 +       * struct. Prevent this by holding a reference on p across the
 +       * wake up.
         */
 -      wake_up(&q->waiter);
 +      get_task_struct(p);
 +
 +      plist_del(&q->list, &q->list.plist);
        /*
 -       * The waiting task can free the futex_q as soon as this is written,
 -       * without taking any locks.  This must come last.
 -       *
 -       * A memory barrier is required here to prevent the following store to
 -       * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
 -       * end of wake_up() does not prevent this store from moving.
 +       * The waiting task can free the futex_q as soon as
 +       * q->lock_ptr = NULL is written, without taking any locks. A
 +       * memory barrier is required here to prevent the following
 +       * store to lock_ptr from getting ahead of the plist_del.
         */
        smp_wmb();
        q->lock_ptr = NULL;
 +
 +      wake_up_state(p, TASK_NORMAL);
 +      put_task_struct(p);
  }
  
  static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
@@@ -834,7 -679,7 +836,7 @@@ static int futex_wake(u32 __user *uaddr
        if (!bitset)
                return -EINVAL;
  
-       ret = get_futex_key(uaddr, fshared, &key);
+       ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
  
  
        plist_for_each_entry_safe(this, next, head, list) {
                if (match_futex (&this->key, &key)) {
 -                      if (this->pi_state) {
 +                      if (this->pi_state || this->rt_waiter) {
                                ret = -EINVAL;
                                break;
                        }
@@@ -880,10 -725,10 +882,10 @@@ futex_wake_op(u32 __user *uaddr1, int f
        int ret, op_ret;
  
  retry:
-       ret = get_futex_key(uaddr1, fshared, &key1);
+       ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2);
+       ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out_put_key1;
  
        return ret;
  }
  
 -/*
 - * Requeue all waiters hashed on one physical page to another
 - * physical page.
 +/**
 + * requeue_futex() - Requeue a futex_q from one hb to another
 + * @q:                the futex_q to requeue
 + * @hb1:      the source hash_bucket
 + * @hb2:      the target hash_bucket
 + * @key2:     the new key for the requeued futex_q
 + */
 +static inline
 +void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
 +                 struct futex_hash_bucket *hb2, union futex_key *key2)
 +{
 +
 +      /*
 +       * If key1 and key2 hash to the same bucket, no need to
 +       * requeue.
 +       */
 +      if (likely(&hb1->chain != &hb2->chain)) {
 +              plist_del(&q->list, &hb1->chain);
 +              plist_add(&q->list, &hb2->chain);
 +              q->lock_ptr = &hb2->lock;
 +#ifdef CONFIG_DEBUG_PI_LIST
 +              q->list.plist.lock = &hb2->lock;
 +#endif
 +      }
 +      get_futex_key_refs(key2);
 +      q->key = *key2;
 +}
 +
 +/**
 + * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
 + * q: the futex_q
 + * key:       the key of the requeue target futex
 + *
 + * During futex_requeue, with requeue_pi=1, it is possible to acquire the
 + * target futex if it is uncontended or via a lock steal.  Set the futex_q key
 + * to the requeue target futex so the waiter can detect the wakeup on the right
 + * futex, but remove it from the hb and NULL the rt_waiter so it can detect
 + * atomic lock acquisition.  Must be called with the q->lock_ptr held.
 + */
 +static inline
 +void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
 +{
 +      drop_futex_key_refs(&q->key);
 +      get_futex_key_refs(key);
 +      q->key = *key;
 +
 +      WARN_ON(plist_node_empty(&q->list));
 +      plist_del(&q->list, &q->list.plist);
 +
 +      WARN_ON(!q->rt_waiter);
 +      q->rt_waiter = NULL;
 +
 +      wake_up_state(q->task, TASK_NORMAL);
 +}
 +
 +/**
 + * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
 + * @pifutex:          the user address of the to futex
 + * @hb1:              the from futex hash bucket, must be locked by the caller
 + * @hb2:              the to futex hash bucket, must be locked by the caller
 + * @key1:             the from futex key
 + * @key2:             the to futex key
 + * @ps:                       address to store the pi_state pointer
 + * @set_waiters:      force setting the FUTEX_WAITERS bit (1) or not (0)
 + *
 + * Try and get the lock on behalf of the top waiter if we can do it atomically.
 + * Wake the top waiter if we succeed.  If the caller specified set_waiters,
 + * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
 + * hb1 and hb2 must be held by the caller.
 + *
 + * Returns:
 + *  0 - failed to acquire the lock atomicly
 + *  1 - acquired the lock
 + * <0 - error
 + */
 +static int futex_proxy_trylock_atomic(u32 __user *pifutex,
 +                               struct futex_hash_bucket *hb1,
 +                               struct futex_hash_bucket *hb2,
 +                               union futex_key *key1, union futex_key *key2,
 +                               struct futex_pi_state **ps, int set_waiters)
 +{
 +      struct futex_q *top_waiter = NULL;
 +      u32 curval;
 +      int ret;
 +
 +      if (get_futex_value_locked(&curval, pifutex))
 +              return -EFAULT;
 +
 +      /*
 +       * Find the top_waiter and determine if there are additional waiters.
 +       * If the caller intends to requeue more than 1 waiter to pifutex,
 +       * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
 +       * as we have means to handle the possible fault.  If not, don't set
 +       * the bit unecessarily as it will force the subsequent unlock to enter
 +       * the kernel.
 +       */
 +      top_waiter = futex_top_waiter(hb1, key1);
 +
 +      /* There are no waiters, nothing for us to do. */
 +      if (!top_waiter)
 +              return 0;
 +
 +      /*
 +       * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
 +       * the contended case or if set_waiters is 1.  The pi_state is returned
 +       * in ps in contended cases.
 +       */
 +      ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
 +                                 set_waiters);
 +      if (ret == 1)
 +              requeue_pi_wake_futex(top_waiter, key2);
 +
 +      return ret;
 +}
 +
 +/**
 + * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
 + * uaddr1:    source futex user address
 + * uaddr2:    target futex user address
 + * nr_wake:   number of waiters to wake (must be 1 for requeue_pi)
 + * nr_requeue:        number of waiters to requeue (0-INT_MAX)
 + * requeue_pi:        if we are attempting to requeue from a non-pi futex to a
 + *            pi futex (pi to pi requeue is not supported)
 + *
 + * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
 + * uaddr2 atomically on behalf of the top waiter.
 + *
 + * Returns:
 + * >=0 - on success, the number of tasks requeued or woken
 + *  <0 - on error
   */
  static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
 -                       int nr_wake, int nr_requeue, u32 *cmpval)
 +                       int nr_wake, int nr_requeue, u32 *cmpval,
 +                       int requeue_pi)
  {
        union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
 +      int drop_count = 0, task_count = 0, ret;
 +      struct futex_pi_state *pi_state = NULL;
        struct futex_hash_bucket *hb1, *hb2;
        struct plist_head *head1;
        struct futex_q *this, *next;
 -      int ret, drop_count = 0;
 +      u32 curval2;
 +
 +      if (requeue_pi) {
 +              /*
 +               * requeue_pi requires a pi_state, try to allocate it now
 +               * without any locks in case it fails.
 +               */
 +              if (refill_pi_state_cache())
 +                      return -ENOMEM;
 +              /*
 +               * requeue_pi must wake as many tasks as it can, up to nr_wake
 +               * + nr_requeue, since it acquires the rt_mutex prior to
 +               * returning to userspace, so as to not leave the rt_mutex with
 +               * waiters and no owner.  However, second and third wake-ups
 +               * cannot be predicted as they involve race conditions with the
 +               * first wake and a fault while looking up the pi_state.  Both
 +               * pthread_cond_signal() and pthread_cond_broadcast() should
 +               * use nr_wake=1.
 +               */
 +              if (nr_wake != 1)
 +                      return -EINVAL;
 +      }
  
  retry:
-       ret = get_futex_key(uaddr1, fshared, &key1);
 +      if (pi_state != NULL) {
 +              /*
 +               * We will have to lookup the pi_state again, so free this one
 +               * to keep the accounting correct.
 +               */
 +              free_pi_state(pi_state);
 +              pi_state = NULL;
 +      }
 +
+       ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2);
 -      ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ);
++      ret = get_futex_key(uaddr2, fshared, &key2,
++                          requeue_pi ? VERIFY_WRITE : VERIFY_READ);
        if (unlikely(ret != 0))
                goto out_put_key1;
  
@@@ -1169,105 -854,43 +1172,110 @@@ retry_private
                }
        }
  
 +      if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
 +              /*
 +               * Attempt to acquire uaddr2 and wake the top waiter. If we
 +               * intend to requeue waiters, force setting the FUTEX_WAITERS
 +               * bit.  We force this here where we are able to easily handle
 +               * faults rather in the requeue loop below.
 +               */
 +              ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
 +                                               &key2, &pi_state, nr_requeue);
 +
 +              /*
 +               * At this point the top_waiter has either taken uaddr2 or is
 +               * waiting on it.  If the former, then the pi_state will not
 +               * exist yet, look it up one more time to ensure we have a
 +               * reference to it.
 +               */
 +              if (ret == 1) {
 +                      WARN_ON(pi_state);
 +                      task_count++;
 +                      ret = get_futex_value_locked(&curval2, uaddr2);
 +                      if (!ret)
 +                              ret = lookup_pi_state(curval2, hb2, &key2,
 +                                                    &pi_state);
 +              }
 +
 +              switch (ret) {
 +              case 0:
 +                      break;
 +              case -EFAULT:
 +                      double_unlock_hb(hb1, hb2);
 +                      put_futex_key(fshared, &key2);
 +                      put_futex_key(fshared, &key1);
 +                      ret = get_user(curval2, uaddr2);
 +                      if (!ret)
 +                              goto retry;
 +                      goto out;
 +              case -EAGAIN:
 +                      /* The owner was exiting, try again. */
 +                      double_unlock_hb(hb1, hb2);
 +                      put_futex_key(fshared, &key2);
 +                      put_futex_key(fshared, &key1);
 +                      cond_resched();
 +                      goto retry;
 +              default:
 +                      goto out_unlock;
 +              }
 +      }
 +
        head1 = &hb1->chain;
        plist_for_each_entry_safe(this, next, head1, list) {
 -              if (!match_futex (&this->key, &key1))
 +              if (task_count - nr_wake >= nr_requeue)
 +                      break;
 +
 +              if (!match_futex(&this->key, &key1))
                        continue;
 -              if (++ret <= nr_wake) {
 +
 +              WARN_ON(!requeue_pi && this->rt_waiter);
 +              WARN_ON(requeue_pi && !this->rt_waiter);
 +
 +              /*
 +               * Wake nr_wake waiters.  For requeue_pi, if we acquired the
 +               * lock, we already woke the top_waiter.  If not, it will be
 +               * woken by futex_unlock_pi().
 +               */
 +              if (++task_count <= nr_wake && !requeue_pi) {
                        wake_futex(this);
 -              } else {
 -                      /*
 -                       * If key1 and key2 hash to the same bucket, no need to
 -                       * requeue.
 -                       */
 -                      if (likely(head1 != &hb2->chain)) {
 -                              plist_del(&this->list, &hb1->chain);
 -                              plist_add(&this->list, &hb2->chain);
 -                              this->lock_ptr = &hb2->lock;
 -#ifdef CONFIG_DEBUG_PI_LIST
 -                              this->list.plist.lock = &hb2->lock;
 -#endif
 -                      }
 -                      this->key = key2;
 -                      get_futex_key_refs(&key2);
 -                      drop_count++;
 +                      continue;
 +              }
  
 -                      if (ret - nr_wake >= nr_requeue)
 -                              break;
 +              /*
 +               * Requeue nr_requeue waiters and possibly one more in the case
 +               * of requeue_pi if we couldn't acquire the lock atomically.
 +               */
 +              if (requeue_pi) {
 +                      /* Prepare the waiter to take the rt_mutex. */
 +                      atomic_inc(&pi_state->refcount);
 +                      this->pi_state = pi_state;
 +                      ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
 +                                                      this->rt_waiter,
 +                                                      this->task, 1);
 +                      if (ret == 1) {
 +                              /* We got the lock. */
 +                              requeue_pi_wake_futex(this, &key2);
 +                              continue;
 +                      } else if (ret) {
 +                              /* -EDEADLK */
 +                              this->pi_state = NULL;
 +                              free_pi_state(pi_state);
 +                              goto out_unlock;
 +                      }
                }
 +              requeue_futex(this, hb1, hb2, &key2);
 +              drop_count++;
        }
  
  out_unlock:
        double_unlock_hb(hb1, hb2);
  
-       /* drop_futex_key_refs() must be called outside the spinlocks. */
+       /*
+        * drop_futex_key_refs() must be called outside the spinlocks. During
+        * the requeue we moved futex_q's from the hash bucket at key1 to the
+        * one at key2 and updated their key pointer.  We no longer need to
+        * hold the references to key1.
+        */
        while (--drop_count >= 0)
                drop_futex_key_refs(&key1);
  
@@@ -1276,9 -899,7 +1284,9 @@@ out_put_keys
  out_put_key1:
        put_futex_key(fshared, &key1);
  out:
 -      return ret;
 +      if (pi_state != NULL)
 +              free_pi_state(pi_state);
 +      return ret ? ret : task_count;
  }
  
  /* The key must be already stored in q->key. */
@@@ -1286,6 -907,8 +1294,6 @@@ static inline struct futex_hash_bucket 
  {
        struct futex_hash_bucket *hb;
  
 -      init_waitqueue_head(&q->waiter);
 -
        get_futex_key_refs(&q->key);
        hb = hash_futex(&q->key);
        q->lock_ptr = &hb->lock;
@@@ -1496,154 -1119,39 +1504,154 @@@ handle_fault
   */
  #define FLAGS_SHARED          0x01
  #define FLAGS_CLOCKRT         0x02
 +#define FLAGS_HAS_TIMEOUT     0x04
  
  static long futex_wait_restart(struct restart_block *restart);
 +static long futex_lock_pi_restart(struct restart_block *restart);
  
 -static int futex_wait(u32 __user *uaddr, int fshared,
 -                    u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
 +/**
 + * fixup_owner() - Post lock pi_state and corner case management
 + * @uaddr:    user address of the futex
 + * @fshared:  whether the futex is shared (1) or not (0)
 + * @q:                futex_q (contains pi_state and access to the rt_mutex)
 + * @locked:   if the attempt to take the rt_mutex succeeded (1) or not (0)
 + *
 + * After attempting to lock an rt_mutex, this function is called to cleanup
 + * the pi_state owner as well as handle race conditions that may allow us to
 + * acquire the lock. Must be called with the hb lock held.
 + *
 + * Returns:
 + *  1 - success, lock taken
 + *  0 - success, lock not taken
 + * <0 - on error (-EFAULT)
 + */
 +static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
 +                     int locked)
  {
 -      struct task_struct *curr = current;
 -      struct restart_block *restart;
 -      DECLARE_WAITQUEUE(wait, curr);
 -      struct futex_hash_bucket *hb;
 -      struct futex_q q;
 -      u32 uval;
 -      int ret;
 -      struct hrtimer_sleeper t;
 -      int rem = 0;
 -
 -      if (!bitset)
 -              return -EINVAL;
 +      struct task_struct *owner;
 +      int ret = 0;
  
 -      q.pi_state = NULL;
 -      q.bitset = bitset;
 -retry:
 -      q.key = FUTEX_KEY_INIT;
 -      ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
 -      if (unlikely(ret != 0))
 +      if (locked) {
 +              /*
 +               * Got the lock. We might not be the anticipated owner if we
 +               * did a lock-steal - fix up the PI-state in that case:
 +               */
 +              if (q->pi_state->owner != current)
 +                      ret = fixup_pi_state_owner(uaddr, q, current, fshared);
                goto out;
 +      }
  
 -retry_private:
 -      hb = queue_lock(&q);
 +      /*
 +       * Catch the rare case, where the lock was released when we were on the
 +       * way back before we locked the hash bucket.
 +       */
 +      if (q->pi_state->owner == current) {
 +              /*
 +               * Try to get the rt_mutex now. This might fail as some other
 +               * task acquired the rt_mutex after we removed ourself from the
 +               * rt_mutex waiters list.
 +               */
 +              if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
 +                      locked = 1;
 +                      goto out;
 +              }
 +
 +              /*
 +               * pi_state is incorrect, some other task did a lock steal and
 +               * we returned due to timeout or signal without taking the
 +               * rt_mutex. Too late. We can access the rt_mutex_owner without
 +               * locking, as the other task is now blocked on the hash bucket
 +               * lock. Fix the state up.
 +               */
 +              owner = rt_mutex_owner(&q->pi_state->pi_mutex);
 +              ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
 +              goto out;
 +      }
  
        /*
 -       * Access the page AFTER the hash-bucket is locked.
 -       * Order is important:
 +       * Paranoia check. If we did not take the lock, then we should not be
 +       * the owner, nor the pending owner, of the rt_mutex.
 +       */
 +      if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
 +              printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
 +                              "pi-state %p\n", ret,
 +                              q->pi_state->pi_mutex.owner,
 +                              q->pi_state->owner);
 +
 +out:
 +      return ret ? ret : locked;
 +}
 +
 +/**
 + * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
 + * @hb:               the futex hash bucket, must be locked by the caller
 + * @q:                the futex_q to queue up on
 + * @timeout:  the prepared hrtimer_sleeper, or null for no timeout
 + */
 +static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
 +                              struct hrtimer_sleeper *timeout)
 +{
 +      queue_me(q, hb);
 +
 +      /*
 +       * There might have been scheduling since the queue_me(), as we
 +       * cannot hold a spinlock across the get_user() in case it
 +       * faults, and we cannot just set TASK_INTERRUPTIBLE state when
 +       * queueing ourselves into the futex hash. This code thus has to
 +       * rely on the futex_wake() code removing us from hash when it
 +       * wakes us up.
 +       */
 +      set_current_state(TASK_INTERRUPTIBLE);
 +
 +      /* Arm the timer */
 +      if (timeout) {
 +              hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
 +              if (!hrtimer_active(&timeout->timer))
 +                      timeout->task = NULL;
 +      }
 +
 +      /*
 +       * !plist_node_empty() is safe here without any lock.
 +       * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
 +       */
 +      if (likely(!plist_node_empty(&q->list))) {
 +              /*
 +               * If the timer has already expired, current will already be
 +               * flagged for rescheduling. Only call schedule if there
 +               * is no timeout, or if it has yet to expire.
 +               */
 +              if (!timeout || timeout->task)
 +                      schedule();
 +      }
 +      __set_current_state(TASK_RUNNING);
 +}
 +
 +/**
 + * futex_wait_setup() - Prepare to wait on a futex
 + * @uaddr:    the futex userspace address
 + * @val:      the expected value
 + * @fshared:  whether the futex is shared (1) or not (0)
 + * @q:                the associated futex_q
 + * @hb:               storage for hash_bucket pointer to be returned to caller
 + *
 + * Setup the futex_q and locate the hash_bucket.  Get the futex value and
 + * compare it with the expected value.  Handle atomic faults internally.
 + * Return with the hb lock held and a q.key reference on success, and unlocked
 + * with no q.key reference on failure.
 + *
 + * Returns:
 + *  0 - uaddr contains val and hb has been locked
 + * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
 + */
 +static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
 +                         struct futex_q *q, struct futex_hash_bucket **hb)
 +{
 +      u32 uval;
 +      int ret;
 +
 +      /*
 +       * Access the page AFTER the hash-bucket is locked.
 +       * Order is important:
         *
         *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
         *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
         * A consequence is that futex_wait() can return zero and absorb
         * a wakeup when *uaddr != val on entry to the syscall.  This is
         * rare, but normal.
 -       *
 -       * For shared futexes, we hold the mmap semaphore, so the mapping
 -       * cannot have changed since we looked it up in get_futex_key.
         */
-       ret = get_futex_key(uaddr, fshared, &q->key);
 +retry:
 +      q->key = FUTEX_KEY_INIT;
++      ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
 +      if (unlikely(ret != 0))
 +              return ret;
 +
 +retry_private:
 +      *hb = queue_lock(q);
 +
        ret = get_futex_value_locked(&uval, uaddr);
  
 -      if (unlikely(ret)) {
 -              queue_unlock(&q, hb);
 +      if (ret) {
 +              queue_unlock(q, *hb);
  
                ret = get_user(uval, uaddr);
                if (ret)
 -                      goto out_put_key;
 +                      goto out;
  
                if (!fshared)
                        goto retry_private;
  
 -              put_futex_key(fshared, &q.key);
 +              put_futex_key(fshared, &q->key);
                goto retry;
        }
 -      ret = -EWOULDBLOCK;
 -      if (unlikely(uval != val)) {
 -              queue_unlock(&q, hb);
 -              goto out_put_key;
 -      }
  
 -      /* Only actually queue if *uaddr contained val.  */
 -      queue_me(&q, hb);
 +      if (uval != val) {
 +              queue_unlock(q, *hb);
 +              ret = -EWOULDBLOCK;
 +      }
  
 -      /*
 -       * There might have been scheduling since the queue_me(), as we
 -       * cannot hold a spinlock across the get_user() in case it
 -       * faults, and we cannot just set TASK_INTERRUPTIBLE state when
 -       * queueing ourselves into the futex hash.  This code thus has to
 -       * rely on the futex_wake() code removing us from hash when it
 -       * wakes us up.
 -       */
 +out:
 +      if (ret)
 +              put_futex_key(fshared, &q->key);
 +      return ret;
 +}
  
 -      /* add_wait_queue is the barrier after __set_current_state. */
 -      __set_current_state(TASK_INTERRUPTIBLE);
 -      add_wait_queue(&q.waiter, &wait);
 -      /*
 -       * !plist_node_empty() is safe here without any lock.
 -       * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
 -       */
 -      if (likely(!plist_node_empty(&q.list))) {
 -              if (!abs_time)
 -                      schedule();
 -              else {
 -                      hrtimer_init_on_stack(&t.timer,
 -                                            clockrt ? CLOCK_REALTIME :
 -                                            CLOCK_MONOTONIC,
 -                                            HRTIMER_MODE_ABS);
 -                      hrtimer_init_sleeper(&t, current);
 -                      hrtimer_set_expires_range_ns(&t.timer, *abs_time,
 -                                                   current->timer_slack_ns);
 -
 -                      hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
 -                      if (!hrtimer_active(&t.timer))
 -                              t.task = NULL;
 +static int futex_wait(u32 __user *uaddr, int fshared,
 +                    u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
 +{
 +      struct hrtimer_sleeper timeout, *to = NULL;
 +      struct restart_block *restart;
 +      struct futex_hash_bucket *hb;
 +      struct futex_q q;
 +      int ret;
  
 -                      /*
 -                       * the timer could have already expired, in which
 -                       * case current would be flagged for rescheduling.
 -                       * Don't bother calling schedule.
 -                       */
 -                      if (likely(t.task))
 -                              schedule();
 +      if (!bitset)
 +              return -EINVAL;
  
 -                      hrtimer_cancel(&t.timer);
 +      q.pi_state = NULL;
 +      q.bitset = bitset;
 +      q.rt_waiter = NULL;
  
 -                      /* Flag if a timeout occured */
 -                      rem = (t.task == NULL);
 +      if (abs_time) {
 +              to = &timeout;
  
 -                      destroy_hrtimer_on_stack(&t.timer);
 -              }
 +              hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
 +                                    CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 +              hrtimer_init_sleeper(to, current);
 +              hrtimer_set_expires_range_ns(&to->timer, *abs_time,
 +                                           current->timer_slack_ns);
        }
 -      __set_current_state(TASK_RUNNING);
  
 -      /*
 -       * NOTE: we don't remove ourselves from the waitqueue because
 -       * we are the only user of it.
 -       */
 +      /* Prepare to wait on uaddr. */
 +      ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
 +      if (ret)
 +              goto out;
 +
 +      /* queue_me and wait for wakeup, timeout, or a signal. */
 +      futex_wait_queue_me(hb, &q, to);
  
        /* If we were woken (and unqueued), we succeeded, whatever. */
        ret = 0;
        if (!unqueue_me(&q))
                goto out_put_key;
        ret = -ETIMEDOUT;
 -      if (rem)
 +      if (to && !to->task)
                goto out_put_key;
  
        /*
        restart->futex.val = val;
        restart->futex.time = abs_time->tv64;
        restart->futex.bitset = bitset;
 -      restart->futex.flags = 0;
 +      restart->futex.flags = FLAGS_HAS_TIMEOUT;
  
        if (fshared)
                restart->futex.flags |= FLAGS_SHARED;
  out_put_key:
        put_futex_key(fshared, &q.key);
  out:
 +      if (to) {
 +              hrtimer_cancel(&to->timer);
 +              destroy_hrtimer_on_stack(&to->timer);
 +      }
        return ret;
  }
  
@@@ -1774,16 -1290,13 +1782,16 @@@ static long futex_wait_restart(struct r
  {
        u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
        int fshared = 0;
 -      ktime_t t;
 +      ktime_t t, *tp = NULL;
  
 -      t.tv64 = restart->futex.time;
 +      if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
 +              t.tv64 = restart->futex.time;
 +              tp = &t;
 +      }
        restart->fn = do_no_restart_syscall;
        if (restart->futex.flags & FLAGS_SHARED)
                fshared = 1;
 -      return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
 +      return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
                                restart->futex.bitset,
                                restart->futex.flags & FLAGS_CLOCKRT);
  }
@@@ -1799,10 -1312,11 +1807,10 @@@ static int futex_lock_pi(u32 __user *ua
                         int detect, ktime_t *time, int trylock)
  {
        struct hrtimer_sleeper timeout, *to = NULL;
 -      struct task_struct *curr = current;
        struct futex_hash_bucket *hb;
 -      u32 uval, newval, curval;
 +      u32 uval;
        struct futex_q q;
 -      int ret, lock_taken, ownerdied = 0;
 +      int res, ret;
  
        if (refill_pi_state_cache())
                return -ENOMEM;
        }
  
        q.pi_state = NULL;
 +      q.rt_waiter = NULL;
  retry:
        q.key = FUTEX_KEY_INIT;
-       ret = get_futex_key(uaddr, fshared, &q.key);
+       ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out;
  
  retry_private:
        hb = queue_lock(&q);
  
 -retry_locked:
 -      ret = lock_taken = 0;
 -
 -      /*
 -       * To avoid races, we attempt to take the lock here again
 -       * (by doing a 0 -> TID atomic cmpxchg), while holding all
 -       * the locks. It will most likely not succeed.
 -       */
 -      newval = task_pid_vnr(current);
 -
 -      curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
 -
 -      if (unlikely(curval == -EFAULT))
 -              goto uaddr_faulted;
 -
 -      /*
 -       * Detect deadlocks. In case of REQUEUE_PI this is a valid
 -       * situation and we return success to user space.
 -       */
 -      if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
 -              ret = -EDEADLK;
 -              goto out_unlock_put_key;
 -      }
 -
 -      /*
 -       * Surprise - we got the lock. Just return to userspace:
 -       */
 -      if (unlikely(!curval))
 -              goto out_unlock_put_key;
 -
 -      uval = curval;
 -
 -      /*
 -       * Set the WAITERS flag, so the owner will know it has someone
 -       * to wake at next unlock
 -       */
 -      newval = curval | FUTEX_WAITERS;
 -
 -      /*
 -       * There are two cases, where a futex might have no owner (the
 -       * owner TID is 0): OWNER_DIED. We take over the futex in this
 -       * case. We also do an unconditional take over, when the owner
 -       * of the futex died.
 -       *
 -       * This is safe as we are protected by the hash bucket lock !
 -       */
 -      if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
 -              /* Keep the OWNER_DIED bit */
 -              newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
 -              ownerdied = 0;
 -              lock_taken = 1;
 -      }
 -
 -      curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
 -
 -      if (unlikely(curval == -EFAULT))
 -              goto uaddr_faulted;
 -      if (unlikely(curval != uval))
 -              goto retry_locked;
 -
 -      /*
 -       * We took the lock due to owner died take over.
 -       */
 -      if (unlikely(lock_taken))
 -              goto out_unlock_put_key;
 -
 -      /*
 -       * We dont have the lock. Look up the PI state (or create it if
 -       * we are the first waiter):
 -       */
 -      ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
 -
 +      ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
        if (unlikely(ret)) {
                switch (ret) {
 -
 +              case 1:
 +                      /* We got the lock. */
 +                      ret = 0;
 +                      goto out_unlock_put_key;
 +              case -EFAULT:
 +                      goto uaddr_faulted;
                case -EAGAIN:
                        /*
                         * Task is exiting and we just wait for the
                        put_futex_key(fshared, &q.key);
                        cond_resched();
                        goto retry;
 -
 -              case -ESRCH:
 -                      /*
 -                       * No owner found for this futex. Check if the
 -                       * OWNER_DIED bit is set to figure out whether
 -                       * this is a robust futex or not.
 -                       */
 -                      if (get_futex_value_locked(&curval, uaddr))
 -                              goto uaddr_faulted;
 -
 -                      /*
 -                       * We simply start over in case of a robust
 -                       * futex. The code above will take the futex
 -                       * and return happy.
 -                       */
 -                      if (curval & FUTEX_OWNER_DIED) {
 -                              ownerdied = 1;
 -                              goto retry_locked;
 -                      }
                default:
                        goto out_unlock_put_key;
                }
        }
  
        spin_lock(q.lock_ptr);
 -
 -      if (!ret) {
 -              /*
 -               * Got the lock. We might not be the anticipated owner
 -               * if we did a lock-steal - fix up the PI-state in
 -               * that case:
 -               */
 -              if (q.pi_state->owner != curr)
 -                      ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
 -      } else {
 -              /*
 -               * Catch the rare case, where the lock was released
 -               * when we were on the way back before we locked the
 -               * hash bucket.
 -               */
 -              if (q.pi_state->owner == curr) {
 -                      /*
 -                       * Try to get the rt_mutex now. This might
 -                       * fail as some other task acquired the
 -                       * rt_mutex after we removed ourself from the
 -                       * rt_mutex waiters list.
 -                       */
 -                      if (rt_mutex_trylock(&q.pi_state->pi_mutex))
 -                              ret = 0;
 -                      else {
 -                              /*
 -                               * pi_state is incorrect, some other
 -                               * task did a lock steal and we
 -                               * returned due to timeout or signal
 -                               * without taking the rt_mutex. Too
 -                               * late. We can access the
 -                               * rt_mutex_owner without locking, as
 -                               * the other task is now blocked on
 -                               * the hash bucket lock. Fix the state
 -                               * up.
 -                               */
 -                              struct task_struct *owner;
 -                              int res;
 -
 -                              owner = rt_mutex_owner(&q.pi_state->pi_mutex);
 -                              res = fixup_pi_state_owner(uaddr, &q, owner,
 -                                                         fshared);
 -
 -                              /* propagate -EFAULT, if the fixup failed */
 -                              if (res)
 -                                      ret = res;
 -                      }
 -              } else {
 -                      /*
 -                       * Paranoia check. If we did not take the lock
 -                       * in the trylock above, then we should not be
 -                       * the owner of the rtmutex, neither the real
 -                       * nor the pending one:
 -                       */
 -                      if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
 -                              printk(KERN_ERR "futex_lock_pi: ret = %d "
 -                                     "pi-mutex: %p pi-state %p\n", ret,
 -                                     q.pi_state->pi_mutex.owner,
 -                                     q.pi_state->owner);
 -              }
 -      }
 +      /*
 +       * Fixup the pi_state owner and possibly acquire the lock if we
 +       * haven't already.
 +       */
 +      res = fixup_owner(uaddr, fshared, &q, !ret);
 +      /*
 +       * If fixup_owner() returned an error, proprogate that.  If it acquired
 +       * the lock, clear our -ETIMEDOUT or -EINTR.
 +       */
 +      if (res)
 +              ret = (res < 0) ? res : 0;
  
        /*
 -       * If fixup_pi_state_owner() faulted and was unable to handle the
 -       * fault, unlock it and return the fault to userspace.
 +       * If fixup_owner() faulted and was unable to handle the fault, unlock
 +       * it and return the fault to userspace.
         */
        if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
                rt_mutex_unlock(&q.pi_state->pi_mutex);
        /* Unqueue and drop the lock */
        unqueue_me_pi(&q);
  
 -      if (to)
 -              destroy_hrtimer_on_stack(&to->timer);
 -      return ret != -EINTR ? ret : -ERESTARTNOINTR;
 +      goto out;
  
  out_unlock_put_key:
        queue_unlock(&q, hb);
@@@ -1899,7 -1549,7 +1907,7 @@@ out_put_key
  out:
        if (to)
                destroy_hrtimer_on_stack(&to->timer);
 -      return ret;
 +      return ret != -EINTR ? ret : -ERESTARTNOINTR;
  
  uaddr_faulted:
        /*
        goto retry;
  }
  
 +static long futex_lock_pi_restart(struct restart_block *restart)
 +{
 +      u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
 +      ktime_t t, *tp = NULL;
 +      int fshared = restart->futex.flags & FLAGS_SHARED;
 +
 +      if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
 +              t.tv64 = restart->futex.time;
 +              tp = &t;
 +      }
 +      restart->fn = do_no_restart_syscall;
 +
 +      return (long)futex_lock_pi(uaddr, fshared, restart->futex.val, tp, 0);
 +}
  
  /*
   * Userspace attempted a TID -> 0 atomic transition, and failed.
@@@ -1960,7 -1596,7 +1968,7 @@@ retry
        if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
                return -EPERM;
  
-       ret = get_futex_key(uaddr, fshared, &key);
+       ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out;
  
@@@ -2038,252 -1674,6 +2046,252 @@@ pi_faulted
        return ret;
  }
  
-       ret = get_futex_key(uaddr2, fshared, &key2);
 +/**
 + * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
 + * @hb:               the hash_bucket futex_q was original enqueued on
 + * @q:                the futex_q woken while waiting to be requeued
 + * @key2:     the futex_key of the requeue target futex
 + * @timeout:  the timeout associated with the wait (NULL if none)
 + *
 + * Detect if the task was woken on the initial futex as opposed to the requeue
 + * target futex.  If so, determine if it was a timeout or a signal that caused
 + * the wakeup and return the appropriate error code to the caller.  Must be
 + * called with the hb lock held.
 + *
 + * Returns
 + *  0 - no early wakeup detected
 + * <0 - -ETIMEDOUT or -ERESTARTSYS (FIXME: or ERESTARTNOINTR?)
 + */
 +static inline
 +int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
 +                                 struct futex_q *q, union futex_key *key2,
 +                                 struct hrtimer_sleeper *timeout)
 +{
 +      int ret = 0;
 +
 +      /*
 +       * With the hb lock held, we avoid races while we process the wakeup.
 +       * We only need to hold hb (and not hb2) to ensure atomicity as the
 +       * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
 +       * It can't be requeued from uaddr2 to something else since we don't
 +       * support a PI aware source futex for requeue.
 +       */
 +      if (!match_futex(&q->key, key2)) {
 +              WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
 +              /*
 +               * We were woken prior to requeue by a timeout or a signal.
 +               * Unqueue the futex_q and determine which it was.
 +               */
 +              plist_del(&q->list, &q->list.plist);
 +              drop_futex_key_refs(&q->key);
 +
 +              if (timeout && !timeout->task)
 +                      ret = -ETIMEDOUT;
 +              else {
 +                      /*
 +                       * We expect signal_pending(current), but another
 +                       * thread may have handled it for us already.
 +                       */
 +                      /* FIXME: ERESTARTSYS or ERESTARTNOINTR?  Do we care if
 +                       * the user specified SA_RESTART or not? */
 +                      ret = -ERESTARTSYS;
 +              }
 +      }
 +      return ret;
 +}
 +
 +/**
 + * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
 + * @uaddr:    the futex we initialyl wait on (non-pi)
 + * @fshared:  whether the futexes are shared (1) or not (0).  They must be
 + *            the same type, no requeueing from private to shared, etc.
 + * @val:      the expected value of uaddr
 + * @abs_time: absolute timeout
 + * @bitset:   32 bit wakeup bitset set by userspace, defaults to all.
 + * @clockrt:  whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
 + * @uaddr2:   the pi futex we will take prior to returning to user-space
 + *
 + * The caller will wait on uaddr and will be requeued by futex_requeue() to
 + * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
 + * complete the acquisition of the rt_mutex prior to returning to userspace.
 + * This ensures the rt_mutex maintains an owner when it has waiters; without
 + * one, the pi logic wouldn't know which task to boost/deboost, if there was a
 + * need to.
 + *
 + * We call schedule in futex_wait_queue_me() when we enqueue and return there
 + * via the following:
 + * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
 + * 2) wakeup on uaddr2 after a requeue and subsequent unlock
 + * 3) signal (before or after requeue)
 + * 4) timeout (before or after requeue)
 + *
 + * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
 + *
 + * If 2, we may then block on trying to take the rt_mutex and return via:
 + * 5) successful lock
 + * 6) signal
 + * 7) timeout
 + * 8) other lock acquisition failure
 + *
 + * If 6, we setup a restart_block with futex_lock_pi() as the function.
 + *
 + * If 4 or 7, we cleanup and return with -ETIMEDOUT.
 + *
 + * Returns:
 + *  0 - On success
 + * <0 - On error
 + */
 +static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
 +                               u32 val, ktime_t *abs_time, u32 bitset,
 +                               int clockrt, u32 __user *uaddr2)
 +{
 +      struct hrtimer_sleeper timeout, *to = NULL;
 +      struct rt_mutex_waiter rt_waiter;
 +      struct rt_mutex *pi_mutex = NULL;
 +      struct restart_block *restart;
 +      struct futex_hash_bucket *hb;
 +      union futex_key key2;
 +      struct futex_q q;
 +      int res, ret;
 +      u32 uval;
 +
 +      if (!bitset)
 +              return -EINVAL;
 +
 +      if (abs_time) {
 +              to = &timeout;
 +              hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
 +                                    CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 +              hrtimer_init_sleeper(to, current);
 +              hrtimer_set_expires_range_ns(&to->timer, *abs_time,
 +                                           current->timer_slack_ns);
 +      }
 +
 +      /*
 +       * The waiter is allocated on our stack, manipulated by the requeue
 +       * code while we sleep on uaddr.
 +       */
 +      debug_rt_mutex_init_waiter(&rt_waiter);
 +      rt_waiter.task = NULL;
 +
 +      q.pi_state = NULL;
 +      q.bitset = bitset;
 +      q.rt_waiter = &rt_waiter;
 +
 +      key2 = FUTEX_KEY_INIT;
++      ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
 +      if (unlikely(ret != 0))
 +              goto out;
 +
 +      /* Prepare to wait on uaddr. */
 +      ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
 +      if (ret) {
 +              put_futex_key(fshared, &key2);
 +              goto out;
 +      }
 +
 +      /* Queue the futex_q, drop the hb lock, wait for wakeup. */
 +      futex_wait_queue_me(hb, &q, to);
 +
 +      spin_lock(&hb->lock);
 +      ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
 +      spin_unlock(&hb->lock);
 +      if (ret)
 +              goto out_put_keys;
 +
 +      /*
 +       * In order for us to be here, we know our q.key == key2, and since
 +       * we took the hb->lock above, we also know that futex_requeue() has
 +       * completed and we no longer have to concern ourselves with a wakeup
 +       * race with the atomic proxy lock acquition by the requeue code.
 +       */
 +
 +      /* Check if the requeue code acquired the second futex for us. */
 +      if (!q.rt_waiter) {
 +              /*
 +               * Got the lock. We might not be the anticipated owner if we
 +               * did a lock-steal - fix up the PI-state in that case.
 +               */
 +              if (q.pi_state && (q.pi_state->owner != current)) {
 +                      spin_lock(q.lock_ptr);
 +                      ret = fixup_pi_state_owner(uaddr2, &q, current,
 +                                                 fshared);
 +                      spin_unlock(q.lock_ptr);
 +              }
 +      } else {
 +              /*
 +               * We have been woken up by futex_unlock_pi(), a timeout, or a
 +               * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
 +               * the pi_state.
 +               */
 +              WARN_ON(!&q.pi_state);
 +              pi_mutex = &q.pi_state->pi_mutex;
 +              ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
 +              debug_rt_mutex_free_waiter(&rt_waiter);
 +
 +              spin_lock(q.lock_ptr);
 +              /*
 +               * Fixup the pi_state owner and possibly acquire the lock if we
 +               * haven't already.
 +               */
 +              res = fixup_owner(uaddr2, fshared, &q, !ret);
 +              /*
 +               * If fixup_owner() returned an error, proprogate that.  If it
 +               * acquired the lock, clear our -ETIMEDOUT or -EINTR.
 +               */
 +              if (res)
 +                      ret = (res < 0) ? res : 0;
 +
 +              /* Unqueue and drop the lock. */
 +              unqueue_me_pi(&q);
 +      }
 +
 +      /*
 +       * If fixup_pi_state_owner() faulted and was unable to handle the
 +       * fault, unlock the rt_mutex and return the fault to userspace.
 +       */
 +      if (ret == -EFAULT) {
 +              if (rt_mutex_owner(pi_mutex) == current)
 +                      rt_mutex_unlock(pi_mutex);
 +      } else if (ret == -EINTR) {
 +              ret = -EFAULT;
 +              if (get_user(uval, uaddr2))
 +                      goto out_put_keys;
 +
 +              /*
 +               * We've already been requeued, so restart by calling
 +               * futex_lock_pi() directly, rather then returning to this
 +               * function.
 +               */
 +              ret = -ERESTART_RESTARTBLOCK;
 +              restart = &current_thread_info()->restart_block;
 +              restart->fn = futex_lock_pi_restart;
 +              restart->futex.uaddr = (u32 *)uaddr2;
 +              restart->futex.val = uval;
 +              restart->futex.flags = 0;
 +              if (abs_time) {
 +                      restart->futex.flags |= FLAGS_HAS_TIMEOUT;
 +                      restart->futex.time = abs_time->tv64;
 +              }
 +
 +              if (fshared)
 +                      restart->futex.flags |= FLAGS_SHARED;
 +              if (clockrt)
 +                      restart->futex.flags |= FLAGS_CLOCKRT;
 +      }
 +
 +out_put_keys:
 +      put_futex_key(fshared, &q.key);
 +      put_futex_key(fshared, &key2);
 +
 +out:
 +      if (to) {
 +              hrtimer_cancel(&to->timer);
 +              destroy_hrtimer_on_stack(&to->timer);
 +      }
 +      return ret;
 +}
 +
  /*
   * Support for robust futexes: the kernel cleans up held futexes at
   * thread exit time.
@@@ -2506,7 -1896,7 +2514,7 @@@ long do_futex(u32 __user *uaddr, int op
                fshared = 1;
  
        clockrt = op & FUTEX_CLOCK_REALTIME;
 -      if (clockrt && cmd != FUTEX_WAIT_BITSET)
 +      if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
                return -ENOSYS;
  
        switch (cmd) {
                ret = futex_wake(uaddr, fshared, val, val3);
                break;
        case FUTEX_REQUEUE:
 -              ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
 +              ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
                break;
        case FUTEX_CMP_REQUEUE:
 -              ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
 +              ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
 +                                  0);
                break;
        case FUTEX_WAKE_OP:
                ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
                if (futex_cmpxchg_enabled)
                        ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
                break;
 +      case FUTEX_WAIT_REQUEUE_PI:
 +              val3 = FUTEX_BITSET_MATCH_ANY;
 +              ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
 +                                          clockrt, uaddr2);
 +              break;
 +      case FUTEX_CMP_REQUEUE_PI:
 +              ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
 +                                  1);
 +              break;
        default:
                ret = -ENOSYS;
        }
@@@ -2568,8 -1948,7 +2576,8 @@@ SYSCALL_DEFINE6(futex, u32 __user *, ua
        int cmd = op & FUTEX_CMD_MASK;
  
        if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
 -                    cmd == FUTEX_WAIT_BITSET)) {
 +                    cmd == FUTEX_WAIT_BITSET ||
 +                    cmd == FUTEX_WAIT_REQUEUE_PI)) {
                if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
                        return -EFAULT;
                if (!timespec_valid(&ts))
                tp = &t;
        }
        /*
 -       * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
 +       * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
         * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
         */
        if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
 -          cmd == FUTEX_WAKE_OP)
 +          cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
                val2 = (u32) (unsigned long) utime;
  
        return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);