workqueue: rename wq_mutex to wq_pool_mutex
[pandora-kernel.git] / kernel / workqueue.c
index fbc6576..064157e 100644 (file)
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
 #include <linux/idr.h>
+#include <linux/jhash.h>
+#include <linux/hashtable.h>
+#include <linux/rculist.h>
 
-#include "workqueue_sched.h"
+#include "workqueue_internal.h"
 
 enum {
        /*
-        * global_cwq flags
+        * worker_pool flags
         *
-        * A bound gcwq is either associated or disassociated with its CPU.
+        * A bound pool is either associated or disassociated with its CPU.
         * While associated (!DISASSOCIATED), all workers are bound to the
         * CPU and none has %WORKER_UNBOUND set and concurrency management
         * is in effect.
         *
         * While DISASSOCIATED, the cpu may be offline and all workers have
         * %WORKER_UNBOUND set and concurrency management disabled, and may
-        * be executing on any CPU.  The gcwq behaves as an unbound one.
+        * be executing on any CPU.  The pool behaves as an unbound one.
         *
-        * Note that DISASSOCIATED can be flipped only while holding
-        * assoc_mutex of all pools on the gcwq to avoid changing binding
-        * state while create_worker() is in progress.
+        * Note that DISASSOCIATED should be flipped only while holding
+        * manager_mutex to avoid changing binding state while
+        * create_worker() is in progress.
         */
-       GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
-       GCWQ_FREEZING           = 1 << 1,       /* freeze in progress */
-
-       /* pool flags */
        POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
-       POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
+       POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
+       POOL_FREEZING           = 1 << 3,       /* freeze in progress */
 
        /* worker flags */
        WORKER_STARTED          = 1 << 0,       /* started */
@@ -75,15 +75,15 @@ enum {
        WORKER_PREP             = 1 << 3,       /* preparing to run works */
        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
+       WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
 
-       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_UNBOUND |
-                                 WORKER_CPU_INTENSIVE,
+       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
+                                 WORKER_UNBOUND | WORKER_REBOUND,
 
-       NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
+       NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
 
+       UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
-       BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
-       BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
 
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
@@ -111,48 +111,36 @@ enum {
  * P: Preemption protected.  Disabling preemption is enough and should
  *    only be modified and accessed from the local cpu.
  *
- * L: gcwq->lock protected.  Access with gcwq->lock held.
+ * L: pool->lock protected.  Access with pool->lock held.
  *
- * X: During normal operation, modification requires gcwq->lock and
- *    should be done only from local cpu.  Either disabling preemption
- *    on local cpu or grabbing gcwq->lock is enough for read access.
- *    If GCWQ_DISASSOCIATED is set, it's identical to L.
+ * X: During normal operation, modification requires pool->lock and should
+ *    be done only from local cpu.  Either disabling preemption on local
+ *    cpu or grabbing pool->lock is enough for read access.  If
+ *    POOL_DISASSOCIATED is set, it's identical to L.
  *
  * F: wq->flush_mutex protected.
  *
- * W: workqueue_lock protected.
+ * MG: pool->manager_mutex and pool->lock protected.  Writes require both
+ *     locks.  Reads can happen under either lock.
+ *
+ * PL: wq_pool_mutex protected.
+ *
+ * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
+ *
+ * PW: pwq_lock protected.
+ *
+ * FR: wq->flush_mutex and pwq_lock protected for writes.  Sched-RCU
+ *     protected for reads.
+ *
+ * MD: wq_mayday_lock protected.
  */
 
-struct global_cwq;
-struct worker_pool;
-
-/*
- * The poor guys doing the actual heavy lifting.  All on-duty workers
- * are either serving the manager role, on idle list or on busy hash.
- */
-struct worker {
-       /* on idle list while idle, on busy hash table while busy */
-       union {
-               struct list_head        entry;  /* L: while idle */
-               struct hlist_node       hentry; /* L: while busy */
-       };
-
-       struct work_struct      *current_work;  /* L: work being processed */
-       struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
-       struct list_head        scheduled;      /* L: scheduled works */
-       struct task_struct      *task;          /* I: worker task */
-       struct worker_pool      *pool;          /* I: the associated pool */
-       /* 64 bytes boundary on 64bit, 32 on 32bit */
-       unsigned long           last_active;    /* L: last active timestamp */
-       unsigned int            flags;          /* X: flags */
-       int                     id;             /* I: worker id */
-
-       /* for rebinding worker to CPU */
-       struct work_struct      rebind_work;    /* L: for busy worker */
-};
+/* struct worker is defined in workqueue_internal.h */
 
 struct worker_pool {
-       struct global_cwq       *gcwq;          /* I: the owning gcwq */
+       spinlock_t              lock;           /* the pool lock */
+       int                     cpu;            /* I: the associated cpu */
+       int                     id;             /* I: pool ID */
        unsigned int            flags;          /* X: flags */
 
        struct list_head        worklist;       /* L: list of pending works */
@@ -165,44 +153,62 @@ struct worker_pool {
        struct timer_list       idle_timer;     /* L: worker idle timeout */
        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 
-       struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCIATED */
-       struct ida              worker_ida;     /* L: for worker IDs */
-};
+       /* a workers is either on busy_hash or idle_list, or the manager */
+       DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
+                                               /* L: hash of busy workers */
 
-/*
- * Global per-cpu workqueue.  There's one and only one for each cpu
- * and all works are queued and processed here regardless of their
- * target workqueues.
- */
-struct global_cwq {
-       spinlock_t              lock;           /* the gcwq lock */
-       unsigned int            cpu;            /* I: the associated cpu */
-       unsigned int            flags;          /* L: GCWQ_* flags */
+       /* see manage_workers() for details on the two manager mutexes */
+       struct mutex            manager_arb;    /* manager arbitration */
+       struct mutex            manager_mutex;  /* manager exclusion */
+       struct idr              worker_idr;     /* MG: worker IDs and iteration */
 
-       /* workers are chained either in busy_hash or pool idle_list */
-       struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
-                                               /* L: hash of busy workers */
+       struct workqueue_attrs  *attrs;         /* I: worker attributes */
+       struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
+       int                     refcnt;         /* PL: refcnt for unbound pools */
+
+       /*
+        * The current concurrency level.  As it's likely to be accessed
+        * from other CPUs during try_to_wake_up(), put it in a separate
+        * cacheline.
+        */
+       atomic_t                nr_running ____cacheline_aligned_in_smp;
 
-       struct worker_pool      pools[NR_WORKER_POOLS];
-                                               /* normal and highpri pools */
+       /*
+        * Destruction of pool is sched-RCU protected to allow dereferences
+        * from get_work_pool().
+        */
+       struct rcu_head         rcu;
 } ____cacheline_aligned_in_smp;
 
 /*
- * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
- * work_struct->data are used for flags and thus cwqs need to be
- * aligned at two's power of the number of flag bits.
+ * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
+ * of work_struct->data are used for flags and the remaining high bits
+ * point to the pwq; thus, pwqs need to be aligned at two's power of the
+ * number of flag bits.
  */
-struct cpu_workqueue_struct {
+struct pool_workqueue {
        struct worker_pool      *pool;          /* I: the associated pool */
        struct workqueue_struct *wq;            /* I: the owning workqueue */
        int                     work_color;     /* L: current color */
        int                     flush_color;    /* L: flushing color */
+       int                     refcnt;         /* L: reference count */
        int                     nr_in_flight[WORK_NR_COLORS];
                                                /* L: nr of in_flight works */
        int                     nr_active;      /* L: nr of active works */
        int                     max_active;     /* L: max active works */
        struct list_head        delayed_works;  /* L: delayed works */
-};
+       struct list_head        pwqs_node;      /* FR: node on wq->pwqs */
+       struct list_head        mayday_node;    /* MD: node on wq->maydays */
+
+       /*
+        * Release of unbound pwq is punted to system_wq.  See put_pwq()
+        * and pwq_unbound_release_workfn() for details.  pool_workqueue
+        * itself is also sched-RCU protected so that the first pwq can be
+        * determined without grabbing pwq_lock.
+        */
+       struct work_struct      unbound_release_work;
+       struct rcu_head         rcu;
+} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 
 /*
  * Structure used to wait for workqueue flush.
@@ -213,59 +219,62 @@ struct wq_flusher {
        struct completion       done;           /* flush completion */
 };
 
-/*
- * All cpumasks are assumed to be always set on UP and thus can't be
- * used to determine whether there's something to be done.
- */
-#ifdef CONFIG_SMP
-typedef cpumask_var_t mayday_mask_t;
-#define mayday_test_and_set_cpu(cpu, mask)     \
-       cpumask_test_and_set_cpu((cpu), (mask))
-#define mayday_clear_cpu(cpu, mask)            cpumask_clear_cpu((cpu), (mask))
-#define for_each_mayday_cpu(cpu, mask)         for_each_cpu((cpu), (mask))
-#define alloc_mayday_mask(maskp, gfp)          zalloc_cpumask_var((maskp), (gfp))
-#define free_mayday_mask(mask)                 free_cpumask_var((mask))
-#else
-typedef unsigned long mayday_mask_t;
-#define mayday_test_and_set_cpu(cpu, mask)     test_and_set_bit(0, &(mask))
-#define mayday_clear_cpu(cpu, mask)            clear_bit(0, &(mask))
-#define for_each_mayday_cpu(cpu, mask)         if ((cpu) = 0, (mask))
-#define alloc_mayday_mask(maskp, gfp)          true
-#define free_mayday_mask(mask)                 do { } while (0)
-#endif
+struct wq_device;
 
 /*
- * The externally visible workqueue abstraction is an array of
- * per-CPU workqueues:
+ * The externally visible workqueue.  It relays the issued work items to
+ * the appropriate worker_pool through its pool_workqueues.
  */
 struct workqueue_struct {
-       unsigned int            flags;          /* W: WQ_* flags */
-       union {
-               struct cpu_workqueue_struct __percpu    *pcpu;
-               struct cpu_workqueue_struct             *single;
-               unsigned long                           v;
-       } cpu_wq;                               /* I: cwq's */
-       struct list_head        list;           /* W: list of all workqueues */
+       unsigned int            flags;          /* PL: WQ_* flags */
+       struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
+       struct list_head        pwqs;           /* FR: all pwqs of this wq */
+       struct list_head        list;           /* PL: list of all workqueues */
 
        struct mutex            flush_mutex;    /* protects wq flushing */
        int                     work_color;     /* F: current work color */
        int                     flush_color;    /* F: current flush color */
-       atomic_t                nr_cwqs_to_flush; /* flush in progress */
+       atomic_t                nr_pwqs_to_flush; /* flush in progress */
        struct wq_flusher       *first_flusher; /* F: first flusher */
        struct list_head        flusher_queue;  /* F: flush waiters */
        struct list_head        flusher_overflow; /* F: flush overflow list */
 
-       mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
+       struct list_head        maydays;        /* MD: pwqs requesting rescue */
        struct worker           *rescuer;       /* I: rescue worker */
 
-       int                     nr_drainers;    /* W: drain in progress */
-       int                     saved_max_active; /* W: saved cwq max_active */
+       int                     nr_drainers;    /* PL: drain in progress */
+       int                     saved_max_active; /* PW: saved pwq max_active */
+
+#ifdef CONFIG_SYSFS
+       struct wq_device        *wq_dev;        /* I: for sysfs interface */
+#endif
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
        char                    name[];         /* I: workqueue name */
 };
 
+static struct kmem_cache *pwq_cache;
+
+static DEFINE_MUTEX(wq_pool_mutex);    /* protects pools and workqueues list */
+static DEFINE_SPINLOCK(pwq_lock);      /* protects pool_workqueues */
+static DEFINE_SPINLOCK(wq_mayday_lock);        /* protects wq->maydays list */
+
+static LIST_HEAD(workqueues);          /* PL: list of all workqueues */
+static bool workqueue_freezing;                /* PL: have wqs started freezing? */
+
+/* the per-cpu worker pools */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
+                                    cpu_worker_pools);
+
+static DEFINE_IDR(worker_pool_idr);    /* PR: idr of all pools */
+
+/* PL: hash of all unbound pools keyed by pool->attrs */
+static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
+
+/* I: attributes used when instantiating standard unbound pools on demand */
+static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+
 struct workqueue_struct *system_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -277,65 +286,87 @@ EXPORT_SYMBOL_GPL(system_unbound_wq);
 struct workqueue_struct *system_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_freezable_wq);
 
+static int worker_thread(void *__worker);
+static void copy_workqueue_attrs(struct workqueue_attrs *to,
+                                const struct workqueue_attrs *from);
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
 
-#define for_each_worker_pool(pool, gcwq)                               \
-       for ((pool) = &(gcwq)->pools[0];                                \
-            (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
+#define assert_rcu_or_pool_mutex()                                     \
+       rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
+                          lockdep_is_held(&wq_pool_mutex),             \
+                          "sched RCU or wq_pool_mutex should be held")
 
-#define for_each_busy_worker(worker, i, pos, gcwq)                     \
-       for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
-               hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
+#define assert_rcu_or_pwq_lock()                                       \
+       rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
+                          lockdep_is_held(&pwq_lock),                  \
+                          "sched RCU or pwq_lock should be held")
 
-static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
-                                 unsigned int sw)
-{
-       if (cpu < nr_cpu_ids) {
-               if (sw & 1) {
-                       cpu = cpumask_next(cpu, mask);
-                       if (cpu < nr_cpu_ids)
-                               return cpu;
-               }
-               if (sw & 2)
-                       return WORK_CPU_UNBOUND;
-       }
-       return WORK_CPU_NONE;
-}
+#ifdef CONFIG_LOCKDEP
+#define assert_manager_or_pool_lock(pool)                              \
+       WARN_ONCE(debug_locks &&                                        \
+                 !lockdep_is_held(&(pool)->manager_mutex) &&           \
+                 !lockdep_is_held(&(pool)->lock),                      \
+                 "pool->manager_mutex or ->lock should be held")
+#else
+#define assert_manager_or_pool_lock(pool)      do { } while (0)
+#endif
 
-static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
-                               struct workqueue_struct *wq)
-{
-       return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
-}
+#define for_each_cpu_worker_pool(pool, cpu)                            \
+       for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
+            (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+            (pool)++)
 
-/*
- * CPU iterators
+/**
+ * for_each_pool - iterate through all worker_pools in the system
+ * @pool: iteration cursor
+ * @pi: integer used for iteration
  *
- * An extra gcwq is defined for an invalid cpu number
- * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
- * specific CPU.  The following iterators are similar to
- * for_each_*_cpu() iterators but also considers the unbound gcwq.
+ * This must be called either with wq_pool_mutex held or sched RCU read
+ * locked.  If the pool needs to be used beyond the locking in effect, the
+ * caller is responsible for guaranteeing that the pool stays online.
  *
- * for_each_gcwq_cpu()         : possible CPUs + WORK_CPU_UNBOUND
- * for_each_online_gcwq_cpu()  : online CPUs + WORK_CPU_UNBOUND
- * for_each_cwq_cpu()          : possible CPUs for bound workqueues,
- *                               WORK_CPU_UNBOUND for unbound workqueues
+ * The if/else clause exists only for the lockdep assertion and can be
+ * ignored.
  */
-#define for_each_gcwq_cpu(cpu)                                         \
-       for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
+#define for_each_pool(pool, pi)                                                \
+       idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
+               if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
+               else
 
-#define for_each_online_gcwq_cpu(cpu)                                  \
-       for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
+/**
+ * for_each_pool_worker - iterate through all workers of a worker_pool
+ * @worker: iteration cursor
+ * @wi: integer used for iteration
+ * @pool: worker_pool to iterate workers of
+ *
+ * This must be called with either @pool->manager_mutex or ->lock held.
+ *
+ * The if/else clause exists only for the lockdep assertion and can be
+ * ignored.
+ */
+#define for_each_pool_worker(worker, wi, pool)                         \
+       idr_for_each_entry(&(pool)->worker_idr, (worker), (wi))         \
+               if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
+               else
 
-#define for_each_cwq_cpu(cpu, wq)                                      \
-       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
+/**
+ * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
+ * @pwq: iteration cursor
+ * @wq: the target workqueue
+ *
+ * This must be called either with pwq_lock held or sched RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+ *
+ * The if/else clause exists only for the lockdep assertion and can be
+ * ignored.
+ */
+#define for_each_pwq(pwq, wq)                                          \
+       list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
+               if (({ assert_rcu_or_pwq_lock(); false; })) { }         \
+               else
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
@@ -453,64 +484,35 @@ static inline void debug_work_activate(struct work_struct *work) { }
 static inline void debug_work_deactivate(struct work_struct *work) { }
 #endif
 
-/* Serializes the accesses to the list of workqueues. */
-static DEFINE_SPINLOCK(workqueue_lock);
-static LIST_HEAD(workqueues);
-static bool workqueue_freezing;                /* W: have wqs started freezing? */
-
-/*
- * The almighty global cpu workqueues.  nr_running is the only field
- * which is expected to be used frequently by other cpus via
- * try_to_wake_up().  Put it in a separate cacheline.
- */
-static DEFINE_PER_CPU(struct global_cwq, global_cwq);
-static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
-
-/*
- * Global cpu workqueue and nr_running counter for unbound gcwq.  The
- * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
- * workers have WORKER_UNBOUND set.
- */
-static struct global_cwq unbound_global_cwq;
-static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
-       [0 ... NR_WORKER_POOLS - 1]     = ATOMIC_INIT(0),       /* always 0 */
-};
-
-static int worker_thread(void *__worker);
-
-static int worker_pool_pri(struct worker_pool *pool)
+/* allocate ID and assign it to @pool */
+static int worker_pool_assign_id(struct worker_pool *pool)
 {
-       return pool - pool->gcwq->pools;
-}
+       int ret;
 
-static struct global_cwq *get_gcwq(unsigned int cpu)
-{
-       if (cpu != WORK_CPU_UNBOUND)
-               return &per_cpu(global_cwq, cpu);
-       else
-               return &unbound_global_cwq;
-}
+       lockdep_assert_held(&wq_pool_mutex);
 
-static atomic_t *get_pool_nr_running(struct worker_pool *pool)
-{
-       int cpu = pool->gcwq->cpu;
-       int idx = worker_pool_pri(pool);
+       do {
+               if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
+                       return -ENOMEM;
+               ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
+       } while (ret == -EAGAIN);
 
-       if (cpu != WORK_CPU_UNBOUND)
-               return &per_cpu(pool_nr_running, cpu)[idx];
-       else
-               return &unbound_pool_nr_running[idx];
+       return ret;
 }
 
-static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
-                                           struct workqueue_struct *wq)
+/**
+ * first_pwq - return the first pool_workqueue of the specified workqueue
+ * @wq: the target workqueue
+ *
+ * This must be called either with pwq_lock held or sched RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+ */
+static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
 {
-       if (!(wq->flags & WQ_UNBOUND)) {
-               if (likely(cpu < nr_cpu_ids))
-                       return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
-       } else if (likely(cpu == WORK_CPU_UNBOUND))
-               return wq->cpu_wq.single;
-       return NULL;
+       assert_rcu_or_pwq_lock();
+       return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue,
+                                     pwqs_node);
 }
 
 static unsigned int work_color_to_flags(int color)
@@ -530,19 +532,19 @@ static int work_next_color(int color)
 }
 
 /*
- * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
- * contain the pointer to the queued cwq.  Once execution starts, the flag
- * is cleared and the high bits contain OFFQ flags and CPU number.
+ * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
+ * contain the pointer to the queued pwq.  Once execution starts, the flag
+ * is cleared and the high bits contain OFFQ flags and pool ID.
  *
- * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
- * and clear_work_data() can be used to set the cwq, cpu or clear
+ * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
+ * and clear_work_data() can be used to set the pwq, pool or clear
  * work->data.  These functions should only be called while the work is
  * owned - ie. while the PENDING bit is set.
  *
- * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
- * a work.  gcwq is available once the work has been queued anywhere after
- * initialization until it is sync canceled.  cwq is available only while
- * the work item is queued.
+ * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
+ * corresponding to a work.  Pool is available once the work has been
+ * queued anywhere after initialization until it is sync canceled.  pwq is
+ * available only while the work item is queued.
  *
  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
  * canceled.  While being canceled, a work item may have its PENDING set
@@ -552,20 +554,26 @@ static int work_next_color(int color)
 static inline void set_work_data(struct work_struct *work, unsigned long data,
                                 unsigned long flags)
 {
-       BUG_ON(!work_pending(work));
+       WARN_ON_ONCE(!work_pending(work));
        atomic_long_set(&work->data, data | flags | work_static(work));
 }
 
-static void set_work_cwq(struct work_struct *work,
-                        struct cpu_workqueue_struct *cwq,
+static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
                         unsigned long extra_flags)
 {
-       set_work_data(work, (unsigned long)cwq,
-                     WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
+       set_work_data(work, (unsigned long)pwq,
+                     WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
+}
+
+static void set_work_pool_and_keep_pending(struct work_struct *work,
+                                          int pool_id)
+{
+       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
+                     WORK_STRUCT_PENDING);
 }
 
-static void set_work_cpu_and_clear_pending(struct work_struct *work,
-                                          unsigned int cpu)
+static void set_work_pool_and_clear_pending(struct work_struct *work,
+                                           int pool_id)
 {
        /*
         * The following wmb is paired with the implied mb in
@@ -574,67 +582,100 @@ static void set_work_cpu_and_clear_pending(struct work_struct *work,
         * owner.
         */
        smp_wmb();
-       set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
+       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 }
 
 static void clear_work_data(struct work_struct *work)
 {
-       smp_wmb();      /* see set_work_cpu_and_clear_pending() */
-       set_work_data(work, WORK_STRUCT_NO_CPU, 0);
+       smp_wmb();      /* see set_work_pool_and_clear_pending() */
+       set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 }
 
-static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
+static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
 
-       if (data & WORK_STRUCT_CWQ)
+       if (data & WORK_STRUCT_PWQ)
                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
        else
                return NULL;
 }
 
-static struct global_cwq *get_work_gcwq(struct work_struct *work)
+/**
+ * get_work_pool - return the worker_pool a given work was associated with
+ * @work: the work item of interest
+ *
+ * Return the worker_pool @work was last associated with.  %NULL if none.
+ *
+ * Pools are created and destroyed under wq_pool_mutex, and allows read
+ * access under sched-RCU read lock.  As such, this function should be
+ * called under wq_pool_mutex or with preemption disabled.
+ *
+ * All fields of the returned pool are accessible as long as the above
+ * mentioned locking is in effect.  If the returned pool needs to be used
+ * beyond the critical section, the caller is responsible for ensuring the
+ * returned pool is and stays online.
+ */
+static struct worker_pool *get_work_pool(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
-       unsigned int cpu;
+       int pool_id;
+
+       assert_rcu_or_pool_mutex();
 
-       if (data & WORK_STRUCT_CWQ)
-               return ((struct cpu_workqueue_struct *)
-                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
+       if (data & WORK_STRUCT_PWQ)
+               return ((struct pool_workqueue *)
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 
-       cpu = data >> WORK_OFFQ_CPU_SHIFT;
-       if (cpu == WORK_CPU_NONE)
+       pool_id = data >> WORK_OFFQ_POOL_SHIFT;
+       if (pool_id == WORK_OFFQ_POOL_NONE)
                return NULL;
 
-       BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
-       return get_gcwq(cpu);
+       return idr_find(&worker_pool_idr, pool_id);
+}
+
+/**
+ * get_work_pool_id - return the worker pool ID a given work is associated with
+ * @work: the work item of interest
+ *
+ * Return the worker_pool ID @work was last associated with.
+ * %WORK_OFFQ_POOL_NONE if none.
+ */
+static int get_work_pool_id(struct work_struct *work)
+{
+       unsigned long data = atomic_long_read(&work->data);
+
+       if (data & WORK_STRUCT_PWQ)
+               return ((struct pool_workqueue *)
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
+
+       return data >> WORK_OFFQ_POOL_SHIFT;
 }
 
 static void mark_work_canceling(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_work_gcwq(work);
-       unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
+       unsigned long pool_id = get_work_pool_id(work);
 
-       set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
-                     WORK_STRUCT_PENDING);
+       pool_id <<= WORK_OFFQ_POOL_SHIFT;
+       set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 }
 
 static bool work_is_canceling(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
 
-       return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
+       return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 }
 
 /*
  * Policy functions.  These define the policies on how the global worker
  * pools are managed.  Unless noted otherwise, these functions assume that
- * they're being called with gcwq->lock held.
+ * they're being called with pool->lock held.
  */
 
 static bool __need_more_worker(struct worker_pool *pool)
 {
-       return !atomic_read(get_pool_nr_running(pool));
+       return !atomic_read(&pool->nr_running);
 }
 
 /*
@@ -642,7 +683,7 @@ static bool __need_more_worker(struct worker_pool *pool)
  * running workers.
  *
  * Note that, because unbound workers never contribute to nr_running, this
- * function will always return %true for unbound gcwq as long as the
+ * function will always return %true for unbound pools as long as the
  * worklist isn't empty.
  */
 static bool need_more_worker(struct worker_pool *pool)
@@ -659,9 +700,8 @@ static bool may_start_working(struct worker_pool *pool)
 /* Do I need to keep working?  Called from currently running workers. */
 static bool keep_working(struct worker_pool *pool)
 {
-       atomic_t *nr_running = get_pool_nr_running(pool);
-
-       return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
+       return !list_empty(&pool->worklist) &&
+               atomic_read(&pool->nr_running) <= 1;
 }
 
 /* Do we need a new worker?  Called from manager. */
@@ -680,7 +720,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-       bool managing = pool->flags & POOL_MANAGING_WORKERS;
+       bool managing = mutex_is_locked(&pool->manager_arb);
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
@@ -714,7 +754,7 @@ static struct worker *first_worker(struct worker_pool *pool)
  * Wake up the first idle worker of @pool.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void wake_up_worker(struct worker_pool *pool)
 {
@@ -735,13 +775,13 @@ static void wake_up_worker(struct worker_pool *pool)
  * CONTEXT:
  * spin_lock_irq(rq->lock)
  */
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
+void wq_worker_waking_up(struct task_struct *task, int cpu)
 {
        struct worker *worker = kthread_data(task);
 
        if (!(worker->flags & WORKER_NOT_RUNNING)) {
-               WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu);
-               atomic_inc(get_pool_nr_running(worker->pool));
+               WARN_ON_ONCE(worker->pool->cpu != cpu);
+               atomic_inc(&worker->pool->nr_running);
        }
 }
 
@@ -760,18 +800,24 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
  * RETURNS:
  * Worker task on @cpu to wake up, %NULL if none.
  */
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
-                                      unsigned int cpu)
+struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
 {
        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
-       struct worker_pool *pool = worker->pool;
-       atomic_t *nr_running = get_pool_nr_running(pool);
+       struct worker_pool *pool;
 
+       /*
+        * Rescuers, which may not have all the fields set up like normal
+        * workers, also reach here, let's not access anything before
+        * checking NOT_RUNNING.
+        */
        if (worker->flags & WORKER_NOT_RUNNING)
                return NULL;
 
+       pool = worker->pool;
+
        /* this can only happen on the local cpu */
-       BUG_ON(cpu != raw_smp_processor_id());
+       if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+               return NULL;
 
        /*
         * The counterpart of the following dec_and_test, implied mb,
@@ -781,10 +827,11 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
         * NOT_RUNNING is clear.  This means that we're bound to and
         * running on the local cpu w/ rq lock held and preemption
         * disabled, which in turn means that none else could be
-        * manipulating idle_list, so dereferencing idle_list without gcwq
+        * manipulating idle_list, so dereferencing idle_list without pool
         * lock is safe.
         */
-       if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
+       if (atomic_dec_and_test(&pool->nr_running) &&
+           !list_empty(&pool->worklist))
                to_wakeup = first_worker(pool);
        return to_wakeup ? to_wakeup->task : NULL;
 }
@@ -800,7 +847,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
  * woken up.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock)
+ * spin_lock_irq(pool->lock)
  */
 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
                                    bool wakeup)
@@ -816,14 +863,12 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
         */
        if ((flags & WORKER_NOT_RUNNING) &&
            !(worker->flags & WORKER_NOT_RUNNING)) {
-               atomic_t *nr_running = get_pool_nr_running(pool);
-
                if (wakeup) {
-                       if (atomic_dec_and_test(nr_running) &&
+                       if (atomic_dec_and_test(&pool->nr_running) &&
                            !list_empty(&pool->worklist))
                                wake_up_worker(pool);
                } else
-                       atomic_dec(nr_running);
+                       atomic_dec(&pool->nr_running);
        }
 
        worker->flags |= flags;
@@ -837,7 +882,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
  * Clear @flags in @worker->flags and adjust nr_running accordingly.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock)
+ * spin_lock_irq(pool->lock)
  */
 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 {
@@ -855,87 +900,54 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
         */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
-                       atomic_inc(get_pool_nr_running(pool));
-}
-
-/**
- * busy_worker_head - return the busy hash head for a work
- * @gcwq: gcwq of interest
- * @work: work to be hashed
- *
- * Return hash head of @gcwq for @work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to the hash head.
- */
-static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
-                                          struct work_struct *work)
-{
-       const int base_shift = ilog2(sizeof(struct work_struct));
-       unsigned long v = (unsigned long)work;
-
-       /* simple shift and fold hash, do we need something better? */
-       v >>= base_shift;
-       v += v >> BUSY_WORKER_HASH_ORDER;
-       v &= BUSY_WORKER_HASH_MASK;
-
-       return &gcwq->busy_hash[v];
+                       atomic_inc(&pool->nr_running);
 }
 
 /**
- * __find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @bwh: hash head as returned by busy_worker_head()
+ * find_worker_executing_work - find worker which is executing a work
+ * @pool: pool of interest
  * @work: work to find worker for
  *
- * Find a worker which is executing @work on @gcwq.  @bwh should be
- * the hash head obtained by calling busy_worker_head() with the same
- * work.
+ * Find a worker which is executing @work on @pool by searching
+ * @pool->busy_hash which is keyed by the address of @work.  For a worker
+ * to match, its current execution should match the address of @work and
+ * its work function.  This is to avoid unwanted dependency between
+ * unrelated work executions through a work item being recycled while still
+ * being executed.
+ *
+ * This is a bit tricky.  A work item may be freed once its execution
+ * starts and nothing prevents the freed area from being recycled for
+ * another work item.  If the same work item address ends up being reused
+ * before the original execution finishes, workqueue will identify the
+ * recycled work item as currently executing and make it wait until the
+ * current execution finishes, introducing an unwanted dependency.
+ *
+ * This function checks the work item address and work function to avoid
+ * false positives.  Note that this isn't complete as one may construct a
+ * work function which can introduce dependency onto itself through a
+ * recycled work item.  Well, if somebody wants to shoot oneself in the
+ * foot that badly, there's only so much we can do, and if such deadlock
+ * actually occurs, it should be easy to locate the culprit work function.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  *
  * RETURNS:
  * Pointer to worker which is executing @work if found, NULL
  * otherwise.
  */
-static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
-                                                  struct hlist_head *bwh,
-                                                  struct work_struct *work)
+static struct worker *find_worker_executing_work(struct worker_pool *pool,
+                                                struct work_struct *work)
 {
        struct worker *worker;
-       struct hlist_node *tmp;
 
-       hlist_for_each_entry(worker, tmp, bwh, hentry)
-               if (worker->current_work == work)
+       hash_for_each_possible(pool->busy_hash, worker, hentry,
+                              (unsigned long)work)
+               if (worker->current_work == work &&
+                   worker->current_func == work->func)
                        return worker;
-       return NULL;
-}
 
-/**
- * find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @gcwq.  This function is
- * identical to __find_worker_executing_work() except that this
- * function calculates @bwh itself.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
- * otherwise.
- */
-static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
-                                                struct work_struct *work)
-{
-       return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
-                                           work);
+       return NULL;
 }
 
 /**
@@ -953,7 +965,7 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
  * nested inside outer list_for_each_entry_safe().
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void move_linked_works(struct work_struct *work, struct list_head *head,
                              struct work_struct **nextp)
@@ -979,67 +991,108 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
                *nextp = n;
 }
 
-static void cwq_activate_delayed_work(struct work_struct *work)
+/**
+ * get_pwq - get an extra reference on the specified pool_workqueue
+ * @pwq: pool_workqueue to get
+ *
+ * Obtain an extra reference on @pwq.  The caller should guarantee that
+ * @pwq has positive refcnt and be holding the matching pool->lock.
+ */
+static void get_pwq(struct pool_workqueue *pwq)
+{
+       lockdep_assert_held(&pwq->pool->lock);
+       WARN_ON_ONCE(pwq->refcnt <= 0);
+       pwq->refcnt++;
+}
+
+/**
+ * put_pwq - put a pool_workqueue reference
+ * @pwq: pool_workqueue to put
+ *
+ * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
+ * destruction.  The caller should be holding the matching pool->lock.
+ */
+static void put_pwq(struct pool_workqueue *pwq)
+{
+       lockdep_assert_held(&pwq->pool->lock);
+       if (likely(--pwq->refcnt))
+               return;
+       if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
+               return;
+       /*
+        * @pwq can't be released under pool->lock, bounce to
+        * pwq_unbound_release_workfn().  This never recurses on the same
+        * pool->lock as this path is taken only for unbound workqueues and
+        * the release work item is scheduled on a per-cpu workqueue.  To
+        * avoid lockdep warning, unbound pool->locks are given lockdep
+        * subclass of 1 in get_unbound_pool().
+        */
+       schedule_work(&pwq->unbound_release_work);
+}
+
+static void pwq_activate_delayed_work(struct work_struct *work)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+       struct pool_workqueue *pwq = get_work_pwq(work);
 
        trace_workqueue_activate_work(work);
-       move_linked_works(work, &cwq->pool->worklist, NULL);
+       move_linked_works(work, &pwq->pool->worklist, NULL);
        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       cwq->nr_active++;
+       pwq->nr_active++;
 }
 
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
 {
-       struct work_struct *work = list_first_entry(&cwq->delayed_works,
+       struct work_struct *work = list_first_entry(&pwq->delayed_works,
                                                    struct work_struct, entry);
 
-       cwq_activate_delayed_work(work);
+       pwq_activate_delayed_work(work);
 }
 
 /**
- * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
- * @cwq: cwq of interest
+ * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
+ * @pwq: pwq of interest
  * @color: color of work which left the queue
  *
  * A work either has completed or is removed from pending queue,
- * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ * decrement nr_in_flight of its pwq and handle workqueue flushing.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
 {
-       /* ignore uncolored works */
+       /* uncolored work items don't participate in flushing or nr_active */
        if (color == WORK_NO_COLOR)
-               return;
+               goto out_put;
 
-       cwq->nr_in_flight[color]--;
+       pwq->nr_in_flight[color]--;
 
-       cwq->nr_active--;
-       if (!list_empty(&cwq->delayed_works)) {
+       pwq->nr_active--;
+       if (!list_empty(&pwq->delayed_works)) {
                /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
+               if (pwq->nr_active < pwq->max_active)
+                       pwq_activate_first_delayed(pwq);
        }
 
        /* is flush in progress and are we at the flushing tip? */
-       if (likely(cwq->flush_color != color))
-               return;
+       if (likely(pwq->flush_color != color))
+               goto out_put;
 
        /* are there still in-flight works? */
-       if (cwq->nr_in_flight[color])
-               return;
+       if (pwq->nr_in_flight[color])
+               goto out_put;
 
-       /* this cwq is done, clear flush_color */
-       cwq->flush_color = -1;
+       /* this pwq is done, clear flush_color */
+       pwq->flush_color = -1;
 
        /*
-        * If this was the last cwq, wake up the first flusher.  It
+        * If this was the last pwq, wake up the first flusher.  It
         * will handle the rest.
         */
-       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
-               complete(&cwq->wq->first_flusher->done);
+       if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
+               complete(&pwq->wq->first_flusher->done);
+out_put:
+       put_pwq(pwq);
 }
 
 /**
@@ -1070,7 +1123,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                               unsigned long *flags)
 {
-       struct global_cwq *gcwq;
+       struct worker_pool *pool;
+       struct pool_workqueue *pwq;
 
        local_irq_save(*flags);
 
@@ -1095,41 +1149,43 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
         * The queueing is in progress, or it is already queued. Try to
         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
         */
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
+       pool = get_work_pool(work);
+       if (!pool)
                goto fail;
 
-       spin_lock(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
+       spin_lock(&pool->lock);
+       /*
+        * work->data is guaranteed to point to pwq only while the work
+        * item is queued on pwq->wq, and both updating work->data to point
+        * to pwq on queueing and to pool on dequeueing are done under
+        * pwq->pool->lock.  This in turn guarantees that, if work->data
+        * points to pwq which is associated with a locked pool, the work
+        * item is currently queued on that pool.
+        */
+       pwq = get_work_pwq(work);
+       if (pwq && pwq->pool == pool) {
+               debug_work_deactivate(work);
+
                /*
-                * This work is queued, but perhaps we locked the wrong gcwq.
-                * In that case we must see the new value after rmb(), see
-                * insert_work()->wmb().
+                * A delayed work item cannot be grabbed directly because
+                * it might have linked NO_COLOR work items which, if left
+                * on the delayed_list, will confuse pwq->nr_active
+                * management later on and cause stall.  Make sure the work
+                * item is activated before grabbing.
                 */
-               smp_rmb();
-               if (gcwq == get_work_gcwq(work)) {
-                       debug_work_deactivate(work);
+               if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
+                       pwq_activate_delayed_work(work);
 
-                       /*
-                        * A delayed work item cannot be grabbed directly
-                        * because it might have linked NO_COLOR work items
-                        * which, if left on the delayed_list, will confuse
-                        * cwq->nr_active management later on and cause
-                        * stall.  Make sure the work item is activated
-                        * before grabbing.
-                        */
-                       if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
-                               cwq_activate_delayed_work(work);
+               list_del_init(&work->entry);
+               pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
 
-                       list_del_init(&work->entry);
-                       cwq_dec_nr_in_flight(get_work_cwq(work),
-                               get_work_color(work));
+               /* work->data points to pwq iff queued, point to pool */
+               set_work_pool_and_keep_pending(work, pool->id);
 
-                       spin_unlock(&gcwq->lock);
-                       return 1;
-               }
+               spin_unlock(&pool->lock);
+               return 1;
        }
-       spin_unlock(&gcwq->lock);
+       spin_unlock(&pool->lock);
 fail:
        local_irq_restore(*flags);
        if (work_is_canceling(work))
@@ -1139,39 +1195,32 @@ fail:
 }
 
 /**
- * insert_work - insert a work into gcwq
- * @cwq: cwq @work belongs to
+ * insert_work - insert a work into a pool
+ * @pwq: pwq @work belongs to
  * @work: work to insert
  * @head: insertion point
  * @extra_flags: extra WORK_STRUCT_* flags to set
  *
- * Insert @work which belongs to @cwq into @gcwq after @head.
- * @extra_flags is or'd to work_struct flags.
+ * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
+ * work_struct flags.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void insert_work(struct cpu_workqueue_struct *cwq,
-                       struct work_struct *work, struct list_head *head,
-                       unsigned int extra_flags)
+static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
+                       struct list_head *head, unsigned int extra_flags)
 {
-       struct worker_pool *pool = cwq->pool;
+       struct worker_pool *pool = pwq->pool;
 
        /* we own @work, set data and link */
-       set_work_cwq(work, cwq, extra_flags);
-
-       /*
-        * Ensure that we get the right work->data if we see the
-        * result of list_add() below, see try_to_grab_pending().
-        */
-       smp_wmb();
-
+       set_work_pwq(work, pwq, extra_flags);
        list_add_tail(&work->entry, head);
+       get_pwq(pwq);
 
        /*
-        * Ensure either worker_sched_deactivated() sees the above
-        * list_add_tail() or we see zero nr_running to avoid workers
-        * lying around lazily while there are works to be processed.
+        * Ensure either wq_worker_sleeping() sees the above
+        * list_add_tail() or we see zero nr_running to avoid workers lying
+        * around lazily while there are works to be processed.
         */
        smp_mb();
 
@@ -1181,41 +1230,25 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
 
 /*
  * Test whether @work is being queued from another work executing on the
- * same workqueue.  This is rather expensive and should only be used from
- * cold paths.
+ * same workqueue.
  */
 static bool is_chained_work(struct workqueue_struct *wq)
 {
-       unsigned long flags;
-       unsigned int cpu;
-
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               struct worker *worker;
-               struct hlist_node *pos;
-               int i;
+       struct worker *worker;
 
-               spin_lock_irqsave(&gcwq->lock, flags);
-               for_each_busy_worker(worker, i, pos, gcwq) {
-                       if (worker->task != current)
-                               continue;
-                       spin_unlock_irqrestore(&gcwq->lock, flags);
-                       /*
-                        * I'm @worker, no locking necessary.  See if @work
-                        * is headed to the same workqueue.
-                        */
-                       return worker->current_cwq->wq == wq;
-               }
-               spin_unlock_irqrestore(&gcwq->lock, flags);
-       }
-       return false;
+       worker = current_wq_worker();
+       /*
+        * Return %true iff I'm a worker execuing a work item on @wq.  If
+        * I'm @worker, it's safe to dereference it without locking.
+        */
+       return worker && worker->current_pwq->wq == wq;
 }
 
-static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
+static void __queue_work(int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
-       struct global_cwq *gcwq;
-       struct cpu_workqueue_struct *cwq;
+       struct pool_workqueue *pwq;
+       struct worker_pool *last_pool;
        struct list_head *worklist;
        unsigned int work_flags;
        unsigned int req_cpu = cpu;
@@ -1231,72 +1264,85 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        debug_work_activate(work);
 
        /* if dying, only works from the same workqueue are allowed */
-       if (unlikely(wq->flags & WQ_DRAINING) &&
+       if (unlikely(wq->flags & __WQ_DRAINING) &&
            WARN_ON_ONCE(!is_chained_work(wq)))
                return;
-
-       /* determine gcwq to use */
+retry:
+       /* pwq which will be used unless @work is executing elsewhere */
        if (!(wq->flags & WQ_UNBOUND)) {
-               struct global_cwq *last_gcwq;
-
                if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
+               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+       } else {
+               pwq = first_pwq(wq);
+       }
 
-               /*
-                * It's multi cpu.  If @work was previously on a different
-                * cpu, it might still be running there, in which case the
-                * work needs to be queued on that cpu to guarantee
-                * non-reentrancy.
-                */
-               gcwq = get_gcwq(cpu);
-               last_gcwq = get_work_gcwq(work);
-
-               if (last_gcwq && last_gcwq != gcwq) {
-                       struct worker *worker;
+       /*
+        * If @work was previously on a different pool, it might still be
+        * running there, in which case the work needs to be queued on that
+        * pool to guarantee non-reentrancy.
+        */
+       last_pool = get_work_pool(work);
+       if (last_pool && last_pool != pwq->pool) {
+               struct worker *worker;
 
-                       spin_lock(&last_gcwq->lock);
+               spin_lock(&last_pool->lock);
 
-                       worker = find_worker_executing_work(last_gcwq, work);
+               worker = find_worker_executing_work(last_pool, work);
 
-                       if (worker && worker->current_cwq->wq == wq)
-                               gcwq = last_gcwq;
-                       else {
-                               /* meh... not running there, queue here */
-                               spin_unlock(&last_gcwq->lock);
-                               spin_lock(&gcwq->lock);
-                       }
+               if (worker && worker->current_pwq->wq == wq) {
+                       pwq = worker->current_pwq;
                } else {
-                       spin_lock(&gcwq->lock);
+                       /* meh... not running there, queue here */
+                       spin_unlock(&last_pool->lock);
+                       spin_lock(&pwq->pool->lock);
                }
        } else {
-               gcwq = get_gcwq(WORK_CPU_UNBOUND);
-               spin_lock(&gcwq->lock);
+               spin_lock(&pwq->pool->lock);
+       }
+
+       /*
+        * pwq is determined and locked.  For unbound pools, we could have
+        * raced with pwq release and it could already be dead.  If its
+        * refcnt is zero, repeat pwq selection.  Note that pwqs never die
+        * without another pwq replacing it as the first pwq or while a
+        * work item is executing on it, so the retying is guaranteed to
+        * make forward-progress.
+        */
+       if (unlikely(!pwq->refcnt)) {
+               if (wq->flags & WQ_UNBOUND) {
+                       spin_unlock(&pwq->pool->lock);
+                       cpu_relax();
+                       goto retry;
+               }
+               /* oops */
+               WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
+                         wq->name, cpu);
        }
 
-       /* gcwq determined, get cwq and queue */
-       cwq = get_cwq(gcwq->cpu, wq);
-       trace_workqueue_queue_work(req_cpu, cwq, work);
+       /* pwq determined, queue */
+       trace_workqueue_queue_work(req_cpu, pwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock(&gcwq->lock);
+               spin_unlock(&pwq->pool->lock);
                return;
        }
 
-       cwq->nr_in_flight[cwq->work_color]++;
-       work_flags = work_color_to_flags(cwq->work_color);
+       pwq->nr_in_flight[pwq->work_color]++;
+       work_flags = work_color_to_flags(pwq->work_color);
 
-       if (likely(cwq->nr_active < cwq->max_active)) {
+       if (likely(pwq->nr_active < pwq->max_active)) {
                trace_workqueue_activate_work(work);
-               cwq->nr_active++;
-               worklist = &cwq->pool->worklist;
+               pwq->nr_active++;
+               worklist = &pwq->pool->worklist;
        } else {
                work_flags |= WORK_STRUCT_DELAYED;
-               worklist = &cwq->delayed_works;
+               worklist = &pwq->delayed_works;
        }
 
-       insert_work(cwq, work, worklist, work_flags);
+       insert_work(pwq, work, worklist, work_flags);
 
-       spin_unlock(&gcwq->lock);
+       spin_unlock(&pwq->pool->lock);
 }
 
 /**
@@ -1328,38 +1374,20 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
 }
 EXPORT_SYMBOL_GPL(queue_work_on);
 
-/**
- * queue_work - queue work on a workqueue
- * @wq: workqueue to use
- * @work: work to queue
- *
- * Returns %false if @work was already on a queue, %true otherwise.
- *
- * We queue the work to the CPU on which it was submitted, but if the CPU dies
- * it can be processed by another CPU.
- */
-bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
-{
-       return queue_work_on(WORK_CPU_UNBOUND, wq, work);
-}
-EXPORT_SYMBOL_GPL(queue_work);
-
 void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
-       struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
        /* should have been called from irqsafe timer with irq already off */
-       __queue_work(dwork->cpu, cwq->wq, &dwork->work);
+       __queue_work(dwork->cpu, dwork->wq, &dwork->work);
 }
-EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
+EXPORT_SYMBOL(delayed_work_timer_fn);
 
 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
                                struct delayed_work *dwork, unsigned long delay)
 {
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
-       unsigned int lcpu;
 
        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
                     timer->data != (unsigned long)dwork);
@@ -1379,30 +1407,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 
        timer_stats_timer_set_start_info(&dwork->timer);
 
-       /*
-        * This stores cwq for the moment, for the timer_fn.  Note that the
-        * work's gcwq is preserved to allow reentrance detection for
-        * delayed works.
-        */
-       if (!(wq->flags & WQ_UNBOUND)) {
-               struct global_cwq *gcwq = get_work_gcwq(work);
-
-               /*
-                * If we cannot get the last gcwq from @work directly,
-                * select the last CPU such that it avoids unnecessarily
-                * triggering non-reentrancy check in __queue_work().
-                */
-               lcpu = cpu;
-               if (gcwq)
-                       lcpu = gcwq->cpu;
-               if (lcpu == WORK_CPU_UNBOUND)
-                       lcpu = raw_smp_processor_id();
-       } else {
-               lcpu = WORK_CPU_UNBOUND;
-       }
-
-       set_work_cwq(work, get_cwq(lcpu, wq), 0);
-
+       dwork->wq = wq;
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
@@ -1443,21 +1448,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 }
 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
-/**
- * queue_delayed_work - queue work on a workqueue after delay
- * @wq: workqueue to use
- * @dwork: delayable work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
- */
-bool queue_delayed_work(struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
-{
-       return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
-}
-EXPORT_SYMBOL_GPL(queue_delayed_work);
-
 /**
  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
  * @cpu: CPU number to execute work on
@@ -1496,21 +1486,6 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
 }
 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
 
-/**
- * mod_delayed_work - modify delay of or queue a delayed work
- * @wq: workqueue to use
- * @dwork: work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * mod_delayed_work_on() on local CPU.
- */
-bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
-                     unsigned long delay)
-{
-       return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
-}
-EXPORT_SYMBOL_GPL(mod_delayed_work);
-
 /**
  * worker_enter_idle - enter idle state
  * @worker: worker which is entering idle state
@@ -1519,16 +1494,16 @@ EXPORT_SYMBOL_GPL(mod_delayed_work);
  * necessary.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void worker_enter_idle(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
-       BUG_ON(worker->flags & WORKER_IDLE);
-       BUG_ON(!list_empty(&worker->entry) &&
-              (worker->hentry.next || worker->hentry.pprev));
+       if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
+           WARN_ON_ONCE(!list_empty(&worker->entry) &&
+                        (worker->hentry.next || worker->hentry.pprev)))
+               return;
 
        /* can't use worker_set_flags(), also called from start_worker() */
        worker->flags |= WORKER_IDLE;
@@ -1542,14 +1517,14 @@ static void worker_enter_idle(struct worker *worker)
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
        /*
-        * Sanity check nr_running.  Because gcwq_unbind_fn() releases
-        * gcwq->lock between setting %WORKER_UNBOUND and zapping
+        * Sanity check nr_running.  Because wq_unbind_fn() releases
+        * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
         */
-       WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
+       WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                     pool->nr_workers == pool->nr_idle &&
-                    atomic_read(get_pool_nr_running(pool)));
+                    atomic_read(&pool->nr_running));
 }
 
 /**
@@ -1559,72 +1534,71 @@ static void worker_enter_idle(struct worker *worker)
  * @worker is leaving idle state.  Update stats.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void worker_leave_idle(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
 
-       BUG_ON(!(worker->flags & WORKER_IDLE));
+       if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
+               return;
        worker_clr_flags(worker, WORKER_IDLE);
        pool->nr_idle--;
        list_del_init(&worker->entry);
 }
 
 /**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
- * @worker: self
+ * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
+ * @pool: target worker_pool
+ *
+ * Bind %current to the cpu of @pool if it is associated and lock @pool.
  *
  * Works which are scheduled while the cpu is online must at least be
  * scheduled to a worker which is bound to the cpu so that if they are
  * flushed from cpu callbacks while cpu is going down, they are
  * guaranteed to execute on the cpu.
  *
- * This function is to be used by rogue workers and rescuers to bind
+ * This function is to be used by unbound workers and rescuers to bind
  * themselves to the target cpu and may race with cpu going down or
  * coming online.  kthread_bind() can't be used because it may put the
  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
- * verbatim as it's best effort and blocking and gcwq may be
+ * verbatim as it's best effort and blocking and pool may be
  * [dis]associated in the meantime.
  *
- * This function tries set_cpus_allowed() and locks gcwq and verifies the
- * binding against %GCWQ_DISASSOCIATED which is set during
+ * This function tries set_cpus_allowed() and locks pool and verifies the
+ * binding against %POOL_DISASSOCIATED which is set during
  * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
  * enters idle state or fetches works without dropping lock, it can
  * guarantee the scheduling requirement described in the first paragraph.
  *
  * CONTEXT:
- * Might sleep.  Called without any lock but returns with gcwq->lock
+ * Might sleep.  Called without any lock but returns with pool->lock
  * held.
  *
  * RETURNS:
- * %true if the associated gcwq is online (@worker is successfully
+ * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
-static bool worker_maybe_bind_and_lock(struct worker *worker)
-__acquires(&gcwq->lock)
+static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
+__acquires(&pool->lock)
 {
-       struct global_cwq *gcwq = worker->pool->gcwq;
-       struct task_struct *task = worker->task;
-
        while (true) {
                /*
                 * The following call may fail, succeed or succeed
                 * without actually migrating the task to the cpu if
                 * it races with cpu hotunplug operation.  Verify
-                * against GCWQ_DISASSOCIATED.
+                * against POOL_DISASSOCIATED.
                 */
-               if (!(gcwq->flags & GCWQ_DISASSOCIATED))
-                       set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
+               if (!(pool->flags & POOL_DISASSOCIATED))
+                       set_cpus_allowed_ptr(current, pool->attrs->cpumask);
 
-               spin_lock_irq(&gcwq->lock);
-               if (gcwq->flags & GCWQ_DISASSOCIATED)
+               spin_lock_irq(&pool->lock);
+               if (pool->flags & POOL_DISASSOCIATED)
                        return false;
-               if (task_cpu(task) == gcwq->cpu &&
-                   cpumask_equal(&current->cpus_allowed,
-                                 get_cpu_mask(gcwq->cpu)))
+               if (task_cpu(current) == pool->cpu &&
+                   cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask))
                        return true;
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
 
                /*
                 * We've raced with CPU hot[un]plug.  Give it a breather
@@ -1637,127 +1611,14 @@ __acquires(&gcwq->lock)
        }
 }
 
-/*
- * Rebind an idle @worker to its CPU.  worker_thread() will test
- * list_empty(@worker->entry) before leaving idle and call this function.
- */
-static void idle_worker_rebind(struct worker *worker)
+static struct worker *alloc_worker(void)
 {
-       struct global_cwq *gcwq = worker->pool->gcwq;
-
-       /* CPU may go down again inbetween, clear UNBOUND only on success */
-       if (worker_maybe_bind_and_lock(worker))
-               worker_clr_flags(worker, WORKER_UNBOUND);
-
-       /* rebind complete, become available again */
-       list_add(&worker->entry, &worker->pool->idle_list);
-       spin_unlock_irq(&gcwq->lock);
-}
-
-/*
- * Function for @worker->rebind.work used to rebind unbound busy workers to
- * the associated cpu which is coming back online.  This is scheduled by
- * cpu up but can race with other cpu hotplug operations and may be
- * executed twice without intervening cpu down.
- */
-static void busy_worker_rebind_fn(struct work_struct *work)
-{
-       struct worker *worker = container_of(work, struct worker, rebind_work);
-       struct global_cwq *gcwq = worker->pool->gcwq;
-
-       if (worker_maybe_bind_and_lock(worker))
-               worker_clr_flags(worker, WORKER_UNBOUND);
-
-       spin_unlock_irq(&gcwq->lock);
-}
-
-/**
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
- * @gcwq: gcwq of interest
- *
- * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
- * is different for idle and busy ones.
- *
- * Idle ones will be removed from the idle_list and woken up.  They will
- * add themselves back after completing rebind.  This ensures that the
- * idle_list doesn't contain any unbound workers when re-bound busy workers
- * try to perform local wake-ups for concurrency management.
- *
- * Busy workers can rebind after they finish their current work items.
- * Queueing the rebind work item at the head of the scheduled list is
- * enough.  Note that nr_running will be properly bumped as busy workers
- * rebind.
- *
- * On return, all non-manager workers are scheduled for rebind - see
- * manage_workers() for the manager special case.  Any idle worker
- * including the manager will not appear on @idle_list until rebind is
- * complete, making local wake-ups safe.
- */
-static void rebind_workers(struct global_cwq *gcwq)
-{
-       struct worker_pool *pool;
-       struct worker *worker, *n;
-       struct hlist_node *pos;
-       int i;
-
-       lockdep_assert_held(&gcwq->lock);
-
-       for_each_worker_pool(pool, gcwq)
-               lockdep_assert_held(&pool->assoc_mutex);
-
-       /* dequeue and kick idle ones */
-       for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
-                       /*
-                        * idle workers should be off @pool->idle_list
-                        * until rebind is complete to avoid receiving
-                        * premature local wake-ups.
-                        */
-                       list_del_init(&worker->entry);
-
-                       /*
-                        * worker_thread() will see the above dequeuing
-                        * and call idle_worker_rebind().
-                        */
-                       wake_up_process(worker->task);
-               }
-       }
-
-       /* rebind busy workers */
-       for_each_busy_worker(worker, i, pos, gcwq) {
-               struct work_struct *rebind_work = &worker->rebind_work;
-               struct workqueue_struct *wq;
-
-               if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
-                                    work_data_bits(rebind_work)))
-                       continue;
-
-               debug_work_activate(rebind_work);
-
-               /*
-                * wq doesn't really matter but let's keep @worker->pool
-                * and @cwq->pool consistent for sanity.
-                */
-               if (worker_pool_pri(worker->pool))
-                       wq = system_highpri_wq;
-               else
-                       wq = system_wq;
-
-               insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
-                       worker->scheduled.next,
-                       work_color_to_flags(WORK_NO_COLOR));
-       }
-}
-
-static struct worker *alloc_worker(void)
-{
-       struct worker *worker;
+       struct worker *worker;
 
        worker = kzalloc(sizeof(*worker), GFP_KERNEL);
        if (worker) {
                INIT_LIST_HEAD(&worker->entry);
                INIT_LIST_HEAD(&worker->scheduled);
-               INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
                /* on creation a worker is in !idle && prep state */
                worker->flags = WORKER_PREP;
        }
@@ -1780,19 +1641,25 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct worker_pool *pool)
 {
-       struct global_cwq *gcwq = pool->gcwq;
-       const char *pri = worker_pool_pri(pool) ? "H" : "";
+       const char *pri = pool->attrs->nice < 0  ? "H" : "";
        struct worker *worker = NULL;
        int id = -1;
 
-       spin_lock_irq(&gcwq->lock);
-       while (ida_get_new(&pool->worker_ida, &id)) {
-               spin_unlock_irq(&gcwq->lock);
-               if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
-                       goto fail;
-               spin_lock_irq(&gcwq->lock);
-       }
-       spin_unlock_irq(&gcwq->lock);
+       lockdep_assert_held(&pool->manager_mutex);
+
+       /*
+        * ID is needed to determine kthread name.  Allocate ID first
+        * without installing the pointer.
+        */
+       idr_preload(GFP_KERNEL);
+       spin_lock_irq(&pool->lock);
+
+       id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
+
+       spin_unlock_irq(&pool->lock);
+       idr_preload_end();
+       if (id < 0)
+               goto fail;
 
        worker = alloc_worker();
        if (!worker)
@@ -1801,41 +1668,47 @@ static struct worker *create_worker(struct worker_pool *pool)
        worker->pool = pool;
        worker->id = id;
 
-       if (gcwq->cpu != WORK_CPU_UNBOUND)
+       if (pool->cpu >= 0)
                worker->task = kthread_create_on_node(worker_thread,
-                                       worker, cpu_to_node(gcwq->cpu),
-                                       "kworker/%u:%d%s", gcwq->cpu, id, pri);
+                                       worker, cpu_to_node(pool->cpu),
+                                       "kworker/%d:%d%s", pool->cpu, id, pri);
        else
                worker->task = kthread_create(worker_thread, worker,
-                                             "kworker/u:%d%s", id, pri);
+                                             "kworker/u%d:%d%s",
+                                             pool->id, id, pri);
        if (IS_ERR(worker->task))
                goto fail;
 
-       if (worker_pool_pri(pool))
-               set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
+       /*
+        * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
+        * online CPUs.  It'll be re-applied when any of the CPUs come up.
+        */
+       set_user_nice(worker->task, pool->attrs->nice);
+       set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
+
+       /* prevent userland from meddling with cpumask of workqueue workers */
+       worker->task->flags |= PF_NO_SETAFFINITY;
 
        /*
-        * Determine CPU binding of the new worker depending on
-        * %GCWQ_DISASSOCIATED.  The caller is responsible for ensuring the
-        * flag remains stable across this function.  See the comments
-        * above the flag definition for details.
-        *
-        * As an unbound worker may later become a regular one if CPU comes
-        * online, make sure every worker has %PF_THREAD_BOUND set.
+        * The caller is responsible for ensuring %POOL_DISASSOCIATED
+        * remains stable across this function.  See the comments above the
+        * flag definition for details.
         */
-       if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
-               kthread_bind(worker->task, gcwq->cpu);
-       } else {
-               worker->task->flags |= PF_THREAD_BOUND;
+       if (pool->flags & POOL_DISASSOCIATED)
                worker->flags |= WORKER_UNBOUND;
-       }
+
+       /* successful, commit the pointer to idr */
+       spin_lock_irq(&pool->lock);
+       idr_replace(&pool->worker_idr, worker, worker->id);
+       spin_unlock_irq(&pool->lock);
 
        return worker;
+
 fail:
        if (id >= 0) {
-               spin_lock_irq(&gcwq->lock);
-               ida_remove(&pool->worker_ida, id);
-               spin_unlock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
+               idr_remove(&pool->worker_idr, id);
+               spin_unlock_irq(&pool->lock);
        }
        kfree(worker);
        return NULL;
@@ -1845,10 +1718,10 @@ fail:
  * start_worker - start a newly created worker
  * @worker: worker to start
  *
- * Make the gcwq aware of @worker and start it.
+ * Make the pool aware of @worker and start it.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void start_worker(struct worker *worker)
 {
@@ -1858,24 +1731,50 @@ static void start_worker(struct worker *worker)
        wake_up_process(worker->task);
 }
 
+/**
+ * create_and_start_worker - create and start a worker for a pool
+ * @pool: the target pool
+ *
+ * Grab the managership of @pool and create and start a new worker for it.
+ */
+static int create_and_start_worker(struct worker_pool *pool)
+{
+       struct worker *worker;
+
+       mutex_lock(&pool->manager_mutex);
+
+       worker = create_worker(pool);
+       if (worker) {
+               spin_lock_irq(&pool->lock);
+               start_worker(worker);
+               spin_unlock_irq(&pool->lock);
+       }
+
+       mutex_unlock(&pool->manager_mutex);
+
+       return worker ? 0 : -ENOMEM;
+}
+
 /**
  * destroy_worker - destroy a workqueue worker
  * @worker: worker to be destroyed
  *
- * Destroy @worker and adjust @gcwq stats accordingly.
+ * Destroy @worker and adjust @pool stats accordingly.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which is released and regrabbed.
+ * spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void destroy_worker(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
-       int id = worker->id;
+
+       lockdep_assert_held(&pool->manager_mutex);
+       lockdep_assert_held(&pool->lock);
 
        /* sanity check frenzy */
-       BUG_ON(worker->current_work);
-       BUG_ON(!list_empty(&worker->scheduled));
+       if (WARN_ON(worker->current_work) ||
+           WARN_ON(!list_empty(&worker->scheduled)))
+               return;
 
        if (worker->flags & WORKER_STARTED)
                pool->nr_workers--;
@@ -1885,21 +1784,21 @@ static void destroy_worker(struct worker *worker)
        list_del_init(&worker->entry);
        worker->flags |= WORKER_DIE;
 
-       spin_unlock_irq(&gcwq->lock);
+       idr_remove(&pool->worker_idr, worker->id);
+
+       spin_unlock_irq(&pool->lock);
 
        kthread_stop(worker->task);
        kfree(worker);
 
-       spin_lock_irq(&gcwq->lock);
-       ida_remove(&pool->worker_ida, id);
+       spin_lock_irq(&pool->lock);
 }
 
 static void idle_worker_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        if (too_many_workers(pool)) {
                struct worker *worker;
@@ -1918,35 +1817,33 @@ static void idle_worker_timeout(unsigned long __pool)
                }
        }
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 }
 
-static bool send_mayday(struct work_struct *work)
+static void send_mayday(struct work_struct *work)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
-       struct workqueue_struct *wq = cwq->wq;
-       unsigned int cpu;
+       struct pool_workqueue *pwq = get_work_pwq(work);
+       struct workqueue_struct *wq = pwq->wq;
 
-       if (!(wq->flags & WQ_RESCUER))
-               return false;
+       lockdep_assert_held(&wq_mayday_lock);
+
+       if (!wq->rescuer)
+               return;
 
        /* mayday mayday mayday */
-       cpu = cwq->pool->gcwq->cpu;
-       /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
-       if (cpu == WORK_CPU_UNBOUND)
-               cpu = 0;
-       if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
+       if (list_empty(&pwq->mayday_node)) {
+               list_add_tail(&pwq->mayday_node, &wq->maydays);
                wake_up_process(wq->rescuer->task);
-       return true;
+       }
 }
 
-static void gcwq_mayday_timeout(unsigned long __pool)
+static void pool_mayday_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
-       struct global_cwq *gcwq = pool->gcwq;
        struct work_struct *work;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&wq_mayday_lock);         /* for wq->maydays */
+       spin_lock(&pool->lock);
 
        if (need_to_create_worker(pool)) {
                /*
@@ -1959,7 +1856,8 @@ static void gcwq_mayday_timeout(unsigned long __pool)
                        send_mayday(work);
        }
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock(&pool->lock);
+       spin_unlock_irq(&wq_mayday_lock);
 
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
 }
@@ -1974,28 +1872,26 @@ static void gcwq_mayday_timeout(unsigned long __pool)
  * sent to all rescuers with works scheduled on @pool to resolve
  * possible allocation deadlock.
  *
- * On return, need_to_create_worker() is guaranteed to be false and
- * may_start_working() true.
+ * On return, need_to_create_worker() is guaranteed to be %false and
+ * may_start_working() %true.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true
+ * %false if no action was taken and pool->lock stayed locked, %true
  * otherwise.
  */
 static bool maybe_create_worker(struct worker_pool *pool)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
+__releases(&pool->lock)
+__acquires(&pool->lock)
 {
-       struct global_cwq *gcwq = pool->gcwq;
-
        if (!need_to_create_worker(pool))
                return false;
 restart:
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
@@ -2006,9 +1902,10 @@ restart:
                worker = create_worker(pool);
                if (worker) {
                        del_timer_sync(&pool->mayday_timer);
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
-                       BUG_ON(need_to_create_worker(pool));
+                       if (WARN_ON_ONCE(need_to_create_worker(pool)))
+                               goto restart;
                        return true;
                }
 
@@ -2023,7 +1920,7 @@ restart:
        }
 
        del_timer_sync(&pool->mayday_timer);
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        if (need_to_create_worker(pool))
                goto restart;
        return true;
@@ -2037,11 +1934,11 @@ restart:
  * IDLE_WORKER_TIMEOUT.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Called only from manager.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true
+ * %false if no action was taken and pool->lock stayed locked, %true
  * otherwise.
  */
 static bool maybe_destroy_workers(struct worker_pool *pool)
@@ -2071,63 +1968,58 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * manage_workers - manage worker pool
  * @worker: self
  *
- * Assume the manager role and manage gcwq worker pool @worker belongs
+ * Assume the manager role and manage the worker pool @worker belongs
  * to.  At any given time, there can be only zero or one manager per
- * gcwq.  The exclusion is handled automatically by this function.
+ * pool.  The exclusion is handled automatically by this function.
  *
  * The caller can safely start processing works on false return.  On
  * true return, it's guaranteed that need_to_create_worker() is false
  * and may_start_working() is true.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true if
- * some action was taken.
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times.  Does GFP_KERNEL allocations.
  */
 static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
        bool ret = false;
 
-       if (pool->flags & POOL_MANAGING_WORKERS)
-               return ret;
-
-       pool->flags |= POOL_MANAGING_WORKERS;
-
        /*
-        * To simplify both worker management and CPU hotplug, hold off
-        * management while hotplug is in progress.  CPU hotplug path can't
-        * grab %POOL_MANAGING_WORKERS to achieve this because that can
-        * lead to idle worker depletion (all become busy thinking someone
-        * else is managing) which in turn can result in deadlock under
-        * extreme circumstances.  Use @pool->assoc_mutex to synchronize
-        * manager against CPU hotplug.
+        * Managership is governed by two mutexes - manager_arb and
+        * manager_mutex.  manager_arb handles arbitration of manager role.
+        * Anyone who successfully grabs manager_arb wins the arbitration
+        * and becomes the manager.  mutex_trylock() on pool->manager_arb
+        * failure while holding pool->lock reliably indicates that someone
+        * else is managing the pool and the worker which failed trylock
+        * can proceed to executing work items.  This means that anyone
+        * grabbing manager_arb is responsible for actually performing
+        * manager duties.  If manager_arb is grabbed and released without
+        * actual management, the pool may stall indefinitely.
         *
-        * assoc_mutex would always be free unless CPU hotplug is in
-        * progress.  trylock first without dropping @gcwq->lock.
+        * manager_mutex is used for exclusion of actual management
+        * operations.  The holder of manager_mutex can be sure that none
+        * of management operations, including creation and destruction of
+        * workers, won't take place until the mutex is released.  Because
+        * manager_mutex doesn't interfere with manager role arbitration,
+        * it is guaranteed that the pool's management, while may be
+        * delayed, won't be disturbed by someone else grabbing
+        * manager_mutex.
         */
-       if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
-               spin_unlock_irq(&pool->gcwq->lock);
-               mutex_lock(&pool->assoc_mutex);
-               /*
-                * CPU hotplug could have happened while we were waiting
-                * for assoc_mutex.  Hotplug itself can't handle us
-                * because manager isn't either on idle or busy list, and
-                * @gcwq's state and ours could have deviated.
-                *
-                * As hotplug is now excluded via assoc_mutex, we can
-                * simply try to bind.  It will succeed or fail depending
-                * on @gcwq's current state.  Try it and adjust
-                * %WORKER_UNBOUND accordingly.
-                */
-               if (worker_maybe_bind_and_lock(worker))
-                       worker->flags &= ~WORKER_UNBOUND;
-               else
-                       worker->flags |= WORKER_UNBOUND;
+       if (!mutex_trylock(&pool->manager_arb))
+               return ret;
 
+       /*
+        * With manager arbitration won, manager_mutex would be free in
+        * most cases.  trylock first without dropping @pool->lock.
+        */
+       if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
+               spin_unlock_irq(&pool->lock);
+               mutex_lock(&pool->manager_mutex);
                ret = true;
        }
 
@@ -2140,8 +2032,8 @@ static bool manage_workers(struct worker *worker)
        ret |= maybe_destroy_workers(pool);
        ret |= maybe_create_worker(pool);
 
-       pool->flags &= ~POOL_MANAGING_WORKERS;
-       mutex_unlock(&pool->assoc_mutex);
+       mutex_unlock(&pool->manager_mutex);
+       mutex_unlock(&pool->manager_arb);
        return ret;
 }
 
@@ -2157,18 +2049,15 @@ static bool manage_workers(struct worker *worker)
  * call this function to process a work.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which is released and regrabbed.
+ * spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
+__releases(&pool->lock)
+__acquires(&pool->lock)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+       struct pool_workqueue *pwq = get_work_pwq(work);
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
-       struct hlist_head *bwh = busy_worker_head(gcwq, work);
-       bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
-       work_func_t f = work->func;
+       bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
        int work_color;
        struct worker *collision;
 #ifdef CONFIG_LOCKDEP
@@ -2186,11 +2075,11 @@ __acquires(&gcwq->lock)
        /*
         * Ensure we're on the correct CPU.  DISASSOCIATED test is
         * necessary to avoid spurious warnings from rescuers servicing the
-        * unbound or a disassociated gcwq.
+        * unbound or a disassociated pool.
         */
        WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
-                    !(gcwq->flags & GCWQ_DISASSOCIATED) &&
-                    raw_smp_processor_id() != gcwq->cpu);
+                    !(pool->flags & POOL_DISASSOCIATED) &&
+                    raw_smp_processor_id() != pool->cpu);
 
        /*
         * A single work shouldn't be executed concurrently by
@@ -2198,7 +2087,7 @@ __acquires(&gcwq->lock)
         * already processing the work.  If so, defer the work to the
         * currently executing one.
         */
-       collision = __find_worker_executing_work(gcwq, bwh, work);
+       collision = find_worker_executing_work(pool, work);
        if (unlikely(collision)) {
                move_linked_works(work, &collision->scheduled, NULL);
                return;
@@ -2206,9 +2095,10 @@ __acquires(&gcwq->lock)
 
        /* claim and dequeue */
        debug_work_deactivate(work);
-       hlist_add_head(&worker->hentry, bwh);
+       hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
        worker->current_work = work;
-       worker->current_cwq = cwq;
+       worker->current_func = work->func;
+       worker->current_pwq = pwq;
        work_color = get_work_color(work);
 
        list_del_init(&work->entry);
@@ -2221,53 +2111,55 @@ __acquires(&gcwq->lock)
                worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
 
        /*
-        * Unbound gcwq isn't concurrency managed and work items should be
+        * Unbound pool isn't concurrency managed and work items should be
         * executed ASAP.  Wake up another worker if necessary.
         */
        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
                wake_up_worker(pool);
 
        /*
-        * Record the last CPU and clear PENDING which should be the last
-        * update to @work.  Also, do this inside @gcwq->lock so that
+        * Record the last pool and clear PENDING which should be the last
+        * update to @work.  Also, do this inside @pool->lock so that
         * PENDING and queued state changes happen together while IRQ is
         * disabled.
         */
-       set_work_cpu_and_clear_pending(work, gcwq->cpu);
+       set_work_pool_and_clear_pending(work, pool->id);
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
-       lock_map_acquire_read(&cwq->wq->lockdep_map);
+       lock_map_acquire_read(&pwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
-       f(work);
+       worker->current_func(work);
        /*
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
         */
        trace_workqueue_execute_end(work);
        lock_map_release(&lockdep_map);
-       lock_map_release(&cwq->wq->lockdep_map);
+       lock_map_release(&pwq->wq->lockdep_map);
 
        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
                       "     last function: %pf\n",
-                      current->comm, preempt_count(), task_pid_nr(current), f);
+                      current->comm, preempt_count(), task_pid_nr(current),
+                      worker->current_func);
                debug_show_held_locks(current);
                dump_stack();
        }
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        /* clear cpu intensive status */
        if (unlikely(cpu_intensive))
                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
        /* we're done with it, release */
-       hlist_del_init(&worker->hentry);
+       hash_del(&worker->hentry);
        worker->current_work = NULL;
-       worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       worker->current_func = NULL;
+       worker->current_pwq = NULL;
+       pwq_dec_nr_in_flight(pwq, work_color);
 }
 
 /**
@@ -2279,7 +2171,7 @@ __acquires(&gcwq->lock)
  * fetches a work from the top and executes it.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.
  */
 static void process_scheduled_works(struct worker *worker)
@@ -2295,36 +2187,28 @@ static void process_scheduled_works(struct worker *worker)
  * worker_thread - the worker thread function
  * @__worker: self
  *
- * The gcwq worker thread function.  There's a single dynamic pool of
- * these per each cpu.  These workers process all works regardless of
- * their specific target workqueue.  The only exception is works which
- * belong to workqueues with a rescuer which will be explained in
- * rescuer_thread().
+ * The worker thread function.  All workers belong to a worker_pool -
+ * either a per-cpu one or dynamic unbound one.  These workers process all
+ * work items regardless of their specific target workqueue.  The only
+ * exception is work items which belong to workqueues with a rescuer which
+ * will be explained in rescuer_thread().
  */
 static int worker_thread(void *__worker)
 {
        struct worker *worker = __worker;
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
        /* tell the scheduler that this is a workqueue worker */
        worker->task->flags |= PF_WQ_WORKER;
 woke_up:
-       spin_lock_irq(&gcwq->lock);
-
-       /* we are off idle list if destruction or rebind is requested */
-       if (unlikely(list_empty(&worker->entry))) {
-               spin_unlock_irq(&gcwq->lock);
-
-               /* if DIE is set, destruction is requested */
-               if (worker->flags & WORKER_DIE) {
-                       worker->task->flags &= ~PF_WQ_WORKER;
-                       return 0;
-               }
+       spin_lock_irq(&pool->lock);
 
-               /* otherwise, rebind */
-               idle_worker_rebind(worker);
-               goto woke_up;
+       /* am I supposed to die? */
+       if (unlikely(worker->flags & WORKER_DIE)) {
+               spin_unlock_irq(&pool->lock);
+               WARN_ON_ONCE(!list_empty(&worker->entry));
+               worker->task->flags &= ~PF_WQ_WORKER;
+               return 0;
        }
 
        worker_leave_idle(worker);
@@ -2342,14 +2226,16 @@ recheck:
         * preparing to process a work or actually processing it.
         * Make sure nobody diddled with it while I was sleeping.
         */
-       BUG_ON(!list_empty(&worker->scheduled));
+       WARN_ON_ONCE(!list_empty(&worker->scheduled));
 
        /*
-        * When control reaches this point, we're guaranteed to have
-        * at least one idle worker or that someone else has already
-        * assumed the manager role.
+        * Finish PREP stage.  We're guaranteed to have at least one idle
+        * worker or that someone else has already assumed the manager
+        * role.  This is where @worker starts participating in concurrency
+        * management if applicable and concurrency management is restored
+        * after being rebound.  See rebind_workers() for details.
         */
-       worker_clr_flags(worker, WORKER_PREP);
+       worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
 
        do {
                struct work_struct *work =
@@ -2373,95 +2259,106 @@ sleep:
                goto recheck;
 
        /*
-        * gcwq->lock is held and there's no work to process and no
-        * need to manage, sleep.  Workers are woken up only while
-        * holding gcwq->lock or from local cpu, so setting the
-        * current state before releasing gcwq->lock is enough to
-        * prevent losing any event.
+        * pool->lock is held and there's no work to process and no need to
+        * manage, sleep.  Workers are woken up only while holding
+        * pool->lock or from local cpu, so setting the current state
+        * before releasing pool->lock is enough to prevent losing any
+        * event.
         */
        worker_enter_idle(worker);
        __set_current_state(TASK_INTERRUPTIBLE);
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
        schedule();
        goto woke_up;
 }
 
 /**
  * rescuer_thread - the rescuer thread function
- * @__wq: the associated workqueue
+ * @__rescuer: self
  *
  * Workqueue rescuer thread function.  There's one rescuer for each
- * workqueue which has WQ_RESCUER set.
+ * workqueue which has WQ_MEM_RECLAIM set.
  *
- * Regular work processing on a gcwq may block trying to create a new
+ * Regular work processing on a pool may block trying to create a new
  * worker which uses GFP_KERNEL allocation which has slight chance of
  * developing into deadlock if some works currently on the same queue
  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
  * the problem rescuer solves.
  *
- * When such condition is possible, the gcwq summons rescuers of all
- * workqueues which have works queued on the gcwq and let them process
+ * When such condition is possible, the pool summons rescuers of all
+ * workqueues which have works queued on the pool and let them process
  * those works so that forward progress can be guaranteed.
  *
  * This should happen rarely.
  */
-static int rescuer_thread(void *__wq)
+static int rescuer_thread(void *__rescuer)
 {
-       struct workqueue_struct *wq = __wq;
-       struct worker *rescuer = wq->rescuer;
+       struct worker *rescuer = __rescuer;
+       struct workqueue_struct *wq = rescuer->rescue_wq;
        struct list_head *scheduled = &rescuer->scheduled;
-       bool is_unbound = wq->flags & WQ_UNBOUND;
-       unsigned int cpu;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
+
+       /*
+        * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
+        * doesn't participate in concurrency management.
+        */
+       rescuer->task->flags |= PF_WQ_WORKER;
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
+               rescuer->task->flags &= ~PF_WQ_WORKER;
                return 0;
        }
 
-       /*
-        * See whether any cpu is asking for help.  Unbounded
-        * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
-        */
-       for_each_mayday_cpu(cpu, wq->mayday_mask) {
-               unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
-               struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
-               struct worker_pool *pool = cwq->pool;
-               struct global_cwq *gcwq = pool->gcwq;
+       /* see whether any pwq is asking for help */
+       spin_lock_irq(&wq_mayday_lock);
+
+       while (!list_empty(&wq->maydays)) {
+               struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
+                                       struct pool_workqueue, mayday_node);
+               struct worker_pool *pool = pwq->pool;
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
-               mayday_clear_cpu(cpu, wq->mayday_mask);
+               list_del_init(&pwq->mayday_node);
+
+               spin_unlock_irq(&wq_mayday_lock);
 
                /* migrate to the target cpu if possible */
+               worker_maybe_bind_and_lock(pool);
                rescuer->pool = pool;
-               worker_maybe_bind_and_lock(rescuer);
 
                /*
                 * Slurp in all works issued via this workqueue and
                 * process'em.
                 */
-               BUG_ON(!list_empty(&rescuer->scheduled));
+               WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
                list_for_each_entry_safe(work, n, &pool->worklist, entry)
-                       if (get_work_cwq(work) == cwq)
+                       if (get_work_pwq(work) == pwq)
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
 
                /*
-                * Leave this gcwq.  If keep_working() is %true, notify a
+                * Leave this pool.  If keep_working() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
                 * and stalling the execution.
                 */
                if (keep_working(pool))
                        wake_up_worker(pool);
 
-               spin_unlock_irq(&gcwq->lock);
+               rescuer->pool = NULL;
+               spin_unlock(&pool->lock);
+               spin_lock(&wq_mayday_lock);
        }
 
+       spin_unlock_irq(&wq_mayday_lock);
+
+       /* rescuers should never participate in concurrency management */
+       WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
        schedule();
        goto repeat;
 }
@@ -2479,7 +2376,7 @@ static void wq_barrier_func(struct work_struct *work)
 
 /**
  * insert_wq_barrier - insert a barrier work
- * @cwq: cwq to insert barrier into
+ * @pwq: pwq to insert barrier into
  * @barr: wq_barrier to insert
  * @target: target work to attach @barr to
  * @worker: worker currently executing @target, NULL if @target is not executing
@@ -2496,12 +2393,12 @@ static void wq_barrier_func(struct work_struct *work)
  * after a work with LINKED flag set.
  *
  * Note that when @worker is non-NULL, @target may be modified
- * underneath us, so we can't reliably determine cwq from @target.
+ * underneath us, so we can't reliably determine pwq from @target.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
+static void insert_wq_barrier(struct pool_workqueue *pwq,
                              struct wq_barrier *barr,
                              struct work_struct *target, struct worker *worker)
 {
@@ -2509,7 +2406,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        unsigned int linked = 0;
 
        /*
-        * debugobject calls are safe here even with gcwq->lock locked
+        * debugobject calls are safe here even with pool->lock locked
         * as we know for sure that this will not trigger any of the
         * checks and call back into the fixup functions where we
         * might deadlock.
@@ -2534,23 +2431,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        }
 
        debug_work_activate(&barr->work);
-       insert_work(cwq, &barr->work, head,
+       insert_work(pwq, &barr->work, head,
                    work_color_to_flags(WORK_NO_COLOR) | linked);
 }
 
 /**
- * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
+ * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
  * @wq: workqueue being flushed
  * @flush_color: new flush color, < 0 for no-op
  * @work_color: new work color, < 0 for no-op
  *
- * Prepare cwqs for workqueue flushing.
+ * Prepare pwqs for workqueue flushing.
  *
- * If @flush_color is non-negative, flush_color on all cwqs should be
- * -1.  If no cwq has in-flight commands at the specified color, all
- * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
- * has in flight commands, its cwq->flush_color is set to
- * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
+ * If @flush_color is non-negative, flush_color on all pwqs should be
+ * -1.  If no pwq has in-flight commands at the specified color, all
+ * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
+ * has in flight commands, its pwq->flush_color is set to
+ * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
  * wakeup logic is armed and %true is returned.
  *
  * The caller should have initialized @wq->first_flusher prior to
@@ -2558,7 +2455,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  * @flush_color is negative, no flush color update is done and %false
  * is returned.
  *
- * If @work_color is non-negative, all cwqs should have the same
+ * If @work_color is non-negative, all pwqs should have the same
  * work_color which is previous to @work_color and all will be
  * advanced to @work_color.
  *
@@ -2569,42 +2466,45 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  * %true if @flush_color >= 0 and there's something to flush.  %false
  * otherwise.
  */
-static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
+static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
                                      int flush_color, int work_color)
 {
        bool wait = false;
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
 
        if (flush_color >= 0) {
-               BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
-               atomic_set(&wq->nr_cwqs_to_flush, 1);
+               WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
+               atomic_set(&wq->nr_pwqs_to_flush, 1);
        }
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct global_cwq *gcwq = cwq->pool->gcwq;
+       local_irq_disable();
+
+       for_each_pwq(pwq, wq) {
+               struct worker_pool *pool = pwq->pool;
 
-               spin_lock_irq(&gcwq->lock);
+               spin_lock(&pool->lock);
 
                if (flush_color >= 0) {
-                       BUG_ON(cwq->flush_color != -1);
+                       WARN_ON_ONCE(pwq->flush_color != -1);
 
-                       if (cwq->nr_in_flight[flush_color]) {
-                               cwq->flush_color = flush_color;
-                               atomic_inc(&wq->nr_cwqs_to_flush);
+                       if (pwq->nr_in_flight[flush_color]) {
+                               pwq->flush_color = flush_color;
+                               atomic_inc(&wq->nr_pwqs_to_flush);
                                wait = true;
                        }
                }
 
                if (work_color >= 0) {
-                       BUG_ON(work_color != work_next_color(cwq->work_color));
-                       cwq->work_color = work_color;
+                       WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
+                       pwq->work_color = work_color;
                }
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock(&pool->lock);
        }
 
-       if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
+       local_irq_enable();
+
+       if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
                complete(&wq->first_flusher->done);
 
        return wait;
@@ -2614,11 +2514,8 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
  * flush_workqueue - ensure that any scheduled work has run to completion.
  * @wq: workqueue to flush
  *
- * Forces execution of the workqueue and blocks until its completion.
- * This is typically used in driver shutdown handlers.
- *
- * We sleep until all works which were queued on entry have been handled,
- * but we are not livelocked by new incoming ones.
+ * This function sleeps until all work items which were queued on entry
+ * have finished execution, but it is not livelocked by new incoming ones.
  */
 void flush_workqueue(struct workqueue_struct *wq)
 {
@@ -2645,17 +2542,17 @@ void flush_workqueue(struct workqueue_struct *wq)
                 * becomes our flush_color and work_color is advanced
                 * by one.
                 */
-               BUG_ON(!list_empty(&wq->flusher_overflow));
+               WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
                this_flusher.flush_color = wq->work_color;
                wq->work_color = next_color;
 
                if (!wq->first_flusher) {
                        /* no flush in progress, become the first flusher */
-                       BUG_ON(wq->flush_color != this_flusher.flush_color);
+                       WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
 
                        wq->first_flusher = &this_flusher;
 
-                       if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
+                       if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
                                                       wq->work_color)) {
                                /* nothing to flush, done */
                                wq->flush_color = next_color;
@@ -2664,9 +2561,9 @@ void flush_workqueue(struct workqueue_struct *wq)
                        }
                } else {
                        /* wait in queue */
-                       BUG_ON(wq->flush_color == this_flusher.flush_color);
+                       WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
-                       flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
+                       flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
        } else {
                /*
@@ -2698,8 +2595,8 @@ void flush_workqueue(struct workqueue_struct *wq)
 
        wq->first_flusher = NULL;
 
-       BUG_ON(!list_empty(&this_flusher.list));
-       BUG_ON(wq->flush_color != this_flusher.flush_color);
+       WARN_ON_ONCE(!list_empty(&this_flusher.list));
+       WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
 
        while (true) {
                struct wq_flusher *next, *tmp;
@@ -2712,8 +2609,8 @@ void flush_workqueue(struct workqueue_struct *wq)
                        complete(&next->done);
                }
 
-               BUG_ON(!list_empty(&wq->flusher_overflow) &&
-                      wq->flush_color != work_next_color(wq->work_color));
+               WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
+                            wq->flush_color != work_next_color(wq->work_color));
 
                /* this flush_color is finished, advance by one */
                wq->flush_color = work_next_color(wq->flush_color);
@@ -2733,25 +2630,25 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                        list_splice_tail_init(&wq->flusher_overflow,
                                              &wq->flusher_queue);
-                       flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
+                       flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
 
                if (list_empty(&wq->flusher_queue)) {
-                       BUG_ON(wq->flush_color != wq->work_color);
+                       WARN_ON_ONCE(wq->flush_color != wq->work_color);
                        break;
                }
 
                /*
                 * Need to flush more colors.  Make the next flusher
-                * the new first flusher and arm cwqs.
+                * the new first flusher and arm pwqs.
                 */
-               BUG_ON(wq->flush_color == wq->work_color);
-               BUG_ON(wq->flush_color != next->flush_color);
+               WARN_ON_ONCE(wq->flush_color == wq->work_color);
+               WARN_ON_ONCE(wq->flush_color != next->flush_color);
 
                list_del_init(&next->list);
                wq->first_flusher = next;
 
-               if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
+               if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
                        break;
 
                /*
@@ -2780,76 +2677,80 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
 void drain_workqueue(struct workqueue_struct *wq)
 {
        unsigned int flush_cnt = 0;
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
 
        /*
         * __queue_work() needs to test whether there are drainers, is much
         * hotter than drain_workqueue() and already looks at @wq->flags.
-        * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+        * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
         */
-       spin_lock(&workqueue_lock);
+       mutex_lock(&wq_pool_mutex);
        if (!wq->nr_drainers++)
-               wq->flags |= WQ_DRAINING;
-       spin_unlock(&workqueue_lock);
+               wq->flags |= __WQ_DRAINING;
+       mutex_unlock(&wq_pool_mutex);
 reflush:
        flush_workqueue(wq);
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       local_irq_disable();
+
+       for_each_pwq(pwq, wq) {
                bool drained;
 
-               spin_lock_irq(&cwq->pool->gcwq->lock);
-               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
-               spin_unlock_irq(&cwq->pool->gcwq->lock);
+               spin_lock(&pwq->pool->lock);
+               drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
+               spin_unlock(&pwq->pool->lock);
 
                if (drained)
                        continue;
 
                if (++flush_cnt == 10 ||
                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
-                       pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
+                       pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
                                wq->name, flush_cnt);
+
+               local_irq_enable();
                goto reflush;
        }
 
-       spin_lock(&workqueue_lock);
+       local_irq_enable();
+
+       mutex_lock(&wq_pool_mutex);
        if (!--wq->nr_drainers)
-               wq->flags &= ~WQ_DRAINING;
-       spin_unlock(&workqueue_lock);
+               wq->flags &= ~__WQ_DRAINING;
+       mutex_unlock(&wq_pool_mutex);
 }
 EXPORT_SYMBOL_GPL(drain_workqueue);
 
 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 {
        struct worker *worker = NULL;
-       struct global_cwq *gcwq;
-       struct cpu_workqueue_struct *cwq;
+       struct worker_pool *pool;
+       struct pool_workqueue *pwq;
 
        might_sleep();
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
+
+       local_irq_disable();
+       pool = get_work_pool(work);
+       if (!pool) {
+               local_irq_enable();
                return false;
+       }
 
-       spin_lock_irq(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
-               /*
-                * See the comment near try_to_grab_pending()->smp_rmb().
-                * If it was re-queued to a different gcwq under us, we
-                * are not going to wait.
-                */
-               smp_rmb();
-               cwq = get_work_cwq(work);
-               if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
+       spin_lock(&pool->lock);
+       /* see the comment in try_to_grab_pending() with the same code */
+       pwq = get_work_pwq(work);
+       if (pwq) {
+               if (unlikely(pwq->pool != pool))
                        goto already_gone;
        } else {
-               worker = find_worker_executing_work(gcwq, work);
+               worker = find_worker_executing_work(pool, work);
                if (!worker)
                        goto already_gone;
-               cwq = worker->current_cwq;
+               pwq = worker->current_pwq;
        }
 
-       insert_wq_barrier(cwq, barr, work, worker);
-       spin_unlock_irq(&gcwq->lock);
+       insert_wq_barrier(pwq, barr, work, worker);
+       spin_unlock_irq(&pool->lock);
 
        /*
         * If @max_active is 1 or rescuer is in use, flushing another work
@@ -2857,15 +2758,15 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
         * flusher is not running on the same workqueue by verifying write
         * access.
         */
-       if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
-               lock_map_acquire(&cwq->wq->lockdep_map);
+       if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
+               lock_map_acquire(&pwq->wq->lockdep_map);
        else
-               lock_map_acquire_read(&cwq->wq->lockdep_map);
-       lock_map_release(&cwq->wq->lockdep_map);
+               lock_map_acquire_read(&pwq->wq->lockdep_map);
+       lock_map_release(&pwq->wq->lockdep_map);
 
        return true;
 already_gone:
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
        return false;
 }
 
@@ -2961,8 +2862,7 @@ bool flush_delayed_work(struct delayed_work *dwork)
 {
        local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(dwork->cpu,
-                            get_work_cwq(&dwork->work)->wq, &dwork->work);
+               __queue_work(dwork->cpu, dwork->wq, &dwork->work);
        local_irq_enable();
        return flush_work(&dwork->work);
 }
@@ -2992,7 +2892,8 @@ bool cancel_delayed_work(struct delayed_work *dwork)
        if (unlikely(ret < 0))
                return false;
 
-       set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
+       set_work_pool_and_clear_pending(&dwork->work,
+                                       get_work_pool_id(&dwork->work));
        local_irq_restore(flags);
        return ret;
 }
@@ -3013,66 +2914,6 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
 }
 EXPORT_SYMBOL(cancel_delayed_work_sync);
 
-/**
- * schedule_work_on - put work task on a specific cpu
- * @cpu: cpu to put the work task on
- * @work: job to be done
- *
- * This puts a job on a specific cpu
- */
-bool schedule_work_on(int cpu, struct work_struct *work)
-{
-       return queue_work_on(cpu, system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work_on);
-
-/**
- * schedule_work - put work task in global workqueue
- * @work: job to be done
- *
- * Returns %false if @work was already on the kernel-global workqueue and
- * %true otherwise.
- *
- * This puts a job in the kernel-global workqueue if it was not already
- * queued and leaves it in the same position on the kernel-global
- * workqueue otherwise.
- */
-bool schedule_work(struct work_struct *work)
-{
-       return queue_work(system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work);
-
-/**
- * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
- * @cpu: cpu to use
- * @dwork: job to be done
- * @delay: number of jiffies to wait
- *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue on the specified CPU.
- */
-bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
-                             unsigned long delay)
-{
-       return queue_delayed_work_on(cpu, system_wq, dwork, delay);
-}
-EXPORT_SYMBOL(schedule_delayed_work_on);
-
-/**
- * schedule_delayed_work - put work task in global workqueue after delay
- * @dwork: job to be done
- * @delay: number of jiffies to wait or 0 for immediate execution
- *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue.
- */
-bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
-{
-       return queue_delayed_work(system_wq, dwork, delay);
-}
-EXPORT_SYMBOL(schedule_delayed_work);
-
 /**
  * schedule_on_each_cpu - execute a function synchronously on each online CPU
  * @func: the function to call
@@ -3166,236 +3007,902 @@ int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 }
 EXPORT_SYMBOL_GPL(execute_in_process_context);
 
-int keventd_up(void)
+#ifdef CONFIG_SYSFS
+/*
+ * Workqueues with WQ_SYSFS flag set is visible to userland via
+ * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
+ * following attributes.
+ *
+ *  per_cpu    RO bool : whether the workqueue is per-cpu or unbound
+ *  max_active RW int  : maximum number of in-flight work items
+ *
+ * Unbound workqueues have the following extra attributes.
+ *
+ *  id         RO int  : the associated pool ID
+ *  nice       RW int  : nice value of the workers
+ *  cpumask    RW mask : bitmask of allowed CPUs for the workers
+ */
+struct wq_device {
+       struct workqueue_struct         *wq;
+       struct device                   dev;
+};
+
+static struct workqueue_struct *dev_to_wq(struct device *dev)
 {
-       return system_wq != NULL;
+       struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
+
+       return wq_dev->wq;
 }
 
-static int alloc_cwqs(struct workqueue_struct *wq)
+static ssize_t wq_per_cpu_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
-       /*
-        * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
-        * Make sure that the alignment isn't lower than that of
-        * unsigned long long.
-        */
-       const size_t size = sizeof(struct cpu_workqueue_struct);
-       const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
-                                  __alignof__(unsigned long long));
+       struct workqueue_struct *wq = dev_to_wq(dev);
 
-       if (!(wq->flags & WQ_UNBOUND))
-               wq->cpu_wq.pcpu = __alloc_percpu(size, align);
-       else {
-               void *ptr;
+       return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
+}
 
-               /*
-                * Allocate enough room to align cwq and put an extra
-                * pointer at the end pointing back to the originally
-                * allocated pointer which will be used for free.
-                */
-               ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
-               if (ptr) {
-                       wq->cpu_wq.single = PTR_ALIGN(ptr, align);
-                       *(void **)(wq->cpu_wq.single + 1) = ptr;
-               }
-       }
+static ssize_t wq_max_active_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
 
-       /* just in case, make sure it's actually aligned */
-       BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
-       return wq->cpu_wq.v ? 0 : -ENOMEM;
+       return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
 }
 
-static void free_cwqs(struct workqueue_struct *wq)
+static ssize_t wq_max_active_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
 {
-       if (!(wq->flags & WQ_UNBOUND))
-               free_percpu(wq->cpu_wq.pcpu);
-       else if (wq->cpu_wq.single) {
-               /* the pointer to free is stored right after the cwq */
-               kfree(*(void **)(wq->cpu_wq.single + 1));
-       }
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       int val;
+
+       if (sscanf(buf, "%d", &val) != 1 || val <= 0)
+               return -EINVAL;
+
+       workqueue_set_max_active(wq, val);
+       return count;
 }
 
-static int wq_clamp_max_active(int max_active, unsigned int flags,
-                              const char *name)
+static struct device_attribute wq_sysfs_attrs[] = {
+       __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL),
+       __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store),
+       __ATTR_NULL,
+};
+
+static ssize_t wq_pool_id_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
 {
-       int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       struct worker_pool *pool;
+       int written;
 
-       if (max_active < 1 || max_active > lim)
-               pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
-                       max_active, name, 1, lim);
+       rcu_read_lock_sched();
+       pool = first_pwq(wq)->pool;
+       written = scnprintf(buf, PAGE_SIZE, "%d\n", pool->id);
+       rcu_read_unlock_sched();
 
-       return clamp_val(max_active, 1, lim);
+       return written;
 }
 
-struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
-                                              unsigned int flags,
-                                              int max_active,
-                                              struct lock_class_key *key,
-                                              const char *lock_name, ...)
+static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
 {
-       va_list args, args1;
-       struct workqueue_struct *wq;
-       unsigned int cpu;
-       size_t namelen;
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       int written;
 
-       /* determine namelen, allocate wq and format name */
-       va_start(args, lock_name);
-       va_copy(args1, args);
-       namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+       rcu_read_lock_sched();
+       written = scnprintf(buf, PAGE_SIZE, "%d\n",
+                           first_pwq(wq)->pool->attrs->nice);
+       rcu_read_unlock_sched();
 
-       wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
-       if (!wq)
-               goto err;
+       return written;
+}
 
-       vsnprintf(wq->name, namelen, fmt, args1);
-       va_end(args);
-       va_end(args1);
+/* prepare workqueue_attrs for sysfs store operations */
+static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
+{
+       struct workqueue_attrs *attrs;
 
-       /*
-        * Workqueues which may be used during memory reclaim should
-        * have a rescuer to guarantee forward progress.
-        */
-       if (flags & WQ_MEM_RECLAIM)
-               flags |= WQ_RESCUER;
+       attrs = alloc_workqueue_attrs(GFP_KERNEL);
+       if (!attrs)
+               return NULL;
 
-       max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, flags, wq->name);
+       rcu_read_lock_sched();
+       copy_workqueue_attrs(attrs, first_pwq(wq)->pool->attrs);
+       rcu_read_unlock_sched();
+       return attrs;
+}
 
-       /* init wq */
-       wq->flags = flags;
-       wq->saved_max_active = max_active;
-       mutex_init(&wq->flush_mutex);
-       atomic_set(&wq->nr_cwqs_to_flush, 0);
-       INIT_LIST_HEAD(&wq->flusher_queue);
-       INIT_LIST_HEAD(&wq->flusher_overflow);
+static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       struct workqueue_attrs *attrs;
+       int ret;
 
-       lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
-       INIT_LIST_HEAD(&wq->list);
+       attrs = wq_sysfs_prep_attrs(wq);
+       if (!attrs)
+               return -ENOMEM;
+
+       if (sscanf(buf, "%d", &attrs->nice) == 1 &&
+           attrs->nice >= -20 && attrs->nice <= 19)
+               ret = apply_workqueue_attrs(wq, attrs);
+       else
+               ret = -EINVAL;
 
-       if (alloc_cwqs(wq) < 0)
-               goto err;
+       free_workqueue_attrs(attrs);
+       return ret ?: count;
+}
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               int pool_idx = (bool)(flags & WQ_HIGHPRI);
+static ssize_t wq_cpumask_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       int written;
 
-               BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
-               cwq->pool = &gcwq->pools[pool_idx];
-               cwq->wq = wq;
-               cwq->flush_color = -1;
-               cwq->max_active = max_active;
-               INIT_LIST_HEAD(&cwq->delayed_works);
-       }
+       rcu_read_lock_sched();
+       written = cpumask_scnprintf(buf, PAGE_SIZE,
+                                   first_pwq(wq)->pool->attrs->cpumask);
+       rcu_read_unlock_sched();
 
-       if (flags & WQ_RESCUER) {
-               struct worker *rescuer;
+       written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
+       return written;
+}
 
-               if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
-                       goto err;
+static ssize_t wq_cpumask_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       struct workqueue_attrs *attrs;
+       int ret;
 
-               wq->rescuer = rescuer = alloc_worker();
-               if (!rescuer)
-                       goto err;
+       attrs = wq_sysfs_prep_attrs(wq);
+       if (!attrs)
+               return -ENOMEM;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
-                                              wq->name);
-               if (IS_ERR(rescuer->task))
-                       goto err;
+       ret = cpumask_parse(buf, attrs->cpumask);
+       if (!ret)
+               ret = apply_workqueue_attrs(wq, attrs);
 
-               rescuer->task->flags |= PF_THREAD_BOUND;
-               wake_up_process(rescuer->task);
-       }
+       free_workqueue_attrs(attrs);
+       return ret ?: count;
+}
 
-       /*
-        * workqueue_lock protects global freeze state and workqueues
-        * list.  Grab it, set max_active accordingly and add the new
-        * workqueue to workqueues list.
-        */
-       spin_lock(&workqueue_lock);
+static struct device_attribute wq_sysfs_unbound_attrs[] = {
+       __ATTR(pool_id, 0444, wq_pool_id_show, NULL),
+       __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
+       __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
+       __ATTR_NULL,
+};
 
-       if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
-               for_each_cwq_cpu(cpu, wq)
-                       get_cwq(cpu, wq)->max_active = 0;
+static struct bus_type wq_subsys = {
+       .name                           = "workqueue",
+       .dev_attrs                      = wq_sysfs_attrs,
+};
 
-       list_add(&wq->list, &workqueues);
+static int __init wq_sysfs_init(void)
+{
+       return subsys_virtual_register(&wq_subsys, NULL);
+}
+core_initcall(wq_sysfs_init);
 
-       spin_unlock(&workqueue_lock);
+static void wq_device_release(struct device *dev)
+{
+       struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
 
-       return wq;
-err:
-       if (wq) {
-               free_cwqs(wq);
-               free_mayday_mask(wq->mayday_mask);
-               kfree(wq->rescuer);
-               kfree(wq);
-       }
-       return NULL;
+       kfree(wq_dev);
 }
-EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
 
 /**
- * destroy_workqueue - safely terminate a workqueue
- * @wq: target workqueue
+ * workqueue_sysfs_register - make a workqueue visible in sysfs
+ * @wq: the workqueue to register
  *
- * Safely destroy a workqueue. All work currently pending will be done first.
+ * Expose @wq in sysfs under /sys/bus/workqueue/devices.
+ * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
+ * which is the preferred method.
+ *
+ * Workqueue user should use this function directly iff it wants to apply
+ * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
+ * apply_workqueue_attrs() may race against userland updating the
+ * attributes.
+ *
+ * Returns 0 on success, -errno on failure.
  */
-void destroy_workqueue(struct workqueue_struct *wq)
+int workqueue_sysfs_register(struct workqueue_struct *wq)
 {
-       unsigned int cpu;
+       struct wq_device *wq_dev;
+       int ret;
 
-       /* drain it before proceeding with destruction */
-       drain_workqueue(wq);
+       /*
+        * Adjusting max_active or creating new pwqs by applyting
+        * attributes breaks ordering guarantee.  Disallow exposing ordered
+        * workqueues.
+        */
+       if (WARN_ON(wq->flags & __WQ_ORDERED))
+               return -EINVAL;
+
+       wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
+       if (!wq_dev)
+               return -ENOMEM;
+
+       wq_dev->wq = wq;
+       wq_dev->dev.bus = &wq_subsys;
+       wq_dev->dev.init_name = wq->name;
+       wq_dev->dev.release = wq_device_release;
 
        /*
-        * wq list is used to freeze wq, remove from list after
-        * flushing is complete in case freeze races us.
+        * unbound_attrs are created separately.  Suppress uevent until
+        * everything is ready.
         */
-       spin_lock(&workqueue_lock);
-       list_del(&wq->list);
-       spin_unlock(&workqueue_lock);
+       dev_set_uevent_suppress(&wq_dev->dev, true);
 
-       /* sanity check */
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               int i;
+       ret = device_register(&wq_dev->dev);
+       if (ret) {
+               kfree(wq_dev);
+               wq->wq_dev = NULL;
+               return ret;
+       }
 
-               for (i = 0; i < WORK_NR_COLORS; i++)
-                       BUG_ON(cwq->nr_in_flight[i]);
-               BUG_ON(cwq->nr_active);
-               BUG_ON(!list_empty(&cwq->delayed_works));
+       if (wq->flags & WQ_UNBOUND) {
+               struct device_attribute *attr;
+
+               for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
+                       ret = device_create_file(&wq_dev->dev, attr);
+                       if (ret) {
+                               device_unregister(&wq_dev->dev);
+                               wq->wq_dev = NULL;
+                               return ret;
+                       }
+               }
        }
 
-       if (wq->flags & WQ_RESCUER) {
-               kthread_stop(wq->rescuer->task);
-               free_mayday_mask(wq->mayday_mask);
-               kfree(wq->rescuer);
+       kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
+       return 0;
+}
+
+/**
+ * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
+ * @wq: the workqueue to unregister
+ *
+ * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
+ */
+static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
+{
+       struct wq_device *wq_dev = wq->wq_dev;
+
+       if (!wq->wq_dev)
+               return;
+
+       wq->wq_dev = NULL;
+       device_unregister(&wq_dev->dev);
+}
+#else  /* CONFIG_SYSFS */
+static void workqueue_sysfs_unregister(struct workqueue_struct *wq)    { }
+#endif /* CONFIG_SYSFS */
+
+/**
+ * free_workqueue_attrs - free a workqueue_attrs
+ * @attrs: workqueue_attrs to free
+ *
+ * Undo alloc_workqueue_attrs().
+ */
+void free_workqueue_attrs(struct workqueue_attrs *attrs)
+{
+       if (attrs) {
+               free_cpumask_var(attrs->cpumask);
+               kfree(attrs);
        }
+}
 
-       free_cwqs(wq);
-       kfree(wq);
+/**
+ * alloc_workqueue_attrs - allocate a workqueue_attrs
+ * @gfp_mask: allocation mask to use
+ *
+ * Allocate a new workqueue_attrs, initialize with default settings and
+ * return it.  Returns NULL on failure.
+ */
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
+{
+       struct workqueue_attrs *attrs;
+
+       attrs = kzalloc(sizeof(*attrs), gfp_mask);
+       if (!attrs)
+               goto fail;
+       if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
+               goto fail;
+
+       cpumask_setall(attrs->cpumask);
+       return attrs;
+fail:
+       free_workqueue_attrs(attrs);
+       return NULL;
+}
+
+static void copy_workqueue_attrs(struct workqueue_attrs *to,
+                                const struct workqueue_attrs *from)
+{
+       to->nice = from->nice;
+       cpumask_copy(to->cpumask, from->cpumask);
+}
+
+/*
+ * Hacky implementation of jhash of bitmaps which only considers the
+ * specified number of bits.  We probably want a proper implementation in
+ * include/linux/jhash.h.
+ */
+static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash)
+{
+       int nr_longs = bits / BITS_PER_LONG;
+       int nr_leftover = bits % BITS_PER_LONG;
+       unsigned long leftover = 0;
+
+       if (nr_longs)
+               hash = jhash(bitmap, nr_longs * sizeof(long), hash);
+       if (nr_leftover) {
+               bitmap_copy(&leftover, bitmap + nr_longs, nr_leftover);
+               hash = jhash(&leftover, sizeof(long), hash);
+       }
+       return hash;
+}
+
+/* hash value of the content of @attr */
+static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
+{
+       u32 hash = 0;
+
+       hash = jhash_1word(attrs->nice, hash);
+       hash = jhash_bitmap(cpumask_bits(attrs->cpumask), nr_cpu_ids, hash);
+       return hash;
+}
+
+/* content equality test */
+static bool wqattrs_equal(const struct workqueue_attrs *a,
+                         const struct workqueue_attrs *b)
+{
+       if (a->nice != b->nice)
+               return false;
+       if (!cpumask_equal(a->cpumask, b->cpumask))
+               return false;
+       return true;
 }
-EXPORT_SYMBOL_GPL(destroy_workqueue);
 
 /**
- * cwq_set_max_active - adjust max_active of a cwq
- * @cwq: target cpu_workqueue_struct
- * @max_active: new max_active value.
+ * init_worker_pool - initialize a newly zalloc'd worker_pool
+ * @pool: worker_pool to initialize
  *
- * Set @cwq->max_active to @max_active and activate delayed works if
- * increased.
+ * Initiailize a newly zalloc'd @pool.  It also allocates @pool->attrs.
+ * Returns 0 on success, -errno on failure.  Even on failure, all fields
+ * inside @pool proper are initialized and put_unbound_pool() can be called
+ * on @pool safely to release it.
+ */
+static int init_worker_pool(struct worker_pool *pool)
+{
+       spin_lock_init(&pool->lock);
+       pool->id = -1;
+       pool->cpu = -1;
+       pool->flags |= POOL_DISASSOCIATED;
+       INIT_LIST_HEAD(&pool->worklist);
+       INIT_LIST_HEAD(&pool->idle_list);
+       hash_init(pool->busy_hash);
+
+       init_timer_deferrable(&pool->idle_timer);
+       pool->idle_timer.function = idle_worker_timeout;
+       pool->idle_timer.data = (unsigned long)pool;
+
+       setup_timer(&pool->mayday_timer, pool_mayday_timeout,
+                   (unsigned long)pool);
+
+       mutex_init(&pool->manager_arb);
+       mutex_init(&pool->manager_mutex);
+       idr_init(&pool->worker_idr);
+
+       INIT_HLIST_NODE(&pool->hash_node);
+       pool->refcnt = 1;
+
+       /* shouldn't fail above this point */
+       pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
+       if (!pool->attrs)
+               return -ENOMEM;
+       return 0;
+}
+
+static void rcu_free_pool(struct rcu_head *rcu)
+{
+       struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
+
+       idr_destroy(&pool->worker_idr);
+       free_workqueue_attrs(pool->attrs);
+       kfree(pool);
+}
+
+/**
+ * put_unbound_pool - put a worker_pool
+ * @pool: worker_pool to put
  *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
+ * safe manner.  get_unbound_pool() calls this function on its failure path
+ * and this function should be able to release pools which went through,
+ * successfully or not, init_worker_pool().
  */
-static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
+static void put_unbound_pool(struct worker_pool *pool)
 {
-       cwq->max_active = max_active;
+       struct worker *worker;
+
+       mutex_lock(&wq_pool_mutex);
+       if (--pool->refcnt) {
+               mutex_unlock(&wq_pool_mutex);
+               return;
+       }
+
+       /* sanity checks */
+       if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
+           WARN_ON(!list_empty(&pool->worklist))) {
+               mutex_unlock(&wq_pool_mutex);
+               return;
+       }
+
+       /* release id and unhash */
+       if (pool->id >= 0)
+               idr_remove(&worker_pool_idr, pool->id);
+       hash_del(&pool->hash_node);
+
+       mutex_unlock(&wq_pool_mutex);
 
-       while (!list_empty(&cwq->delayed_works) &&
-              cwq->nr_active < cwq->max_active)
-               cwq_activate_first_delayed(cwq);
+       /*
+        * Become the manager and destroy all workers.  Grabbing
+        * manager_arb prevents @pool's workers from blocking on
+        * manager_mutex.
+        */
+       mutex_lock(&pool->manager_arb);
+       mutex_lock(&pool->manager_mutex);
+       spin_lock_irq(&pool->lock);
+
+       while ((worker = first_worker(pool)))
+               destroy_worker(worker);
+       WARN_ON(pool->nr_workers || pool->nr_idle);
+
+       spin_unlock_irq(&pool->lock);
+       mutex_unlock(&pool->manager_mutex);
+       mutex_unlock(&pool->manager_arb);
+
+       /* shut down the timers */
+       del_timer_sync(&pool->idle_timer);
+       del_timer_sync(&pool->mayday_timer);
+
+       /* sched-RCU protected to allow dereferences from get_work_pool() */
+       call_rcu_sched(&pool->rcu, rcu_free_pool);
 }
 
+/**
+ * get_unbound_pool - get a worker_pool with the specified attributes
+ * @attrs: the attributes of the worker_pool to get
+ *
+ * Obtain a worker_pool which has the same attributes as @attrs, bump the
+ * reference count and return it.  If there already is a matching
+ * worker_pool, it will be used; otherwise, this function attempts to
+ * create a new one.  On failure, returns NULL.
+ */
+static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
+{
+       u32 hash = wqattrs_hash(attrs);
+       struct worker_pool *pool;
+
+       mutex_lock(&wq_pool_mutex);
+
+       /* do we already have a matching pool? */
+       hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
+               if (wqattrs_equal(pool->attrs, attrs)) {
+                       pool->refcnt++;
+                       goto out_unlock;
+               }
+       }
+
+       /* nope, create a new one */
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool || init_worker_pool(pool) < 0)
+               goto fail;
+
+       if (workqueue_freezing)
+               pool->flags |= POOL_FREEZING;
+
+       lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
+       copy_workqueue_attrs(pool->attrs, attrs);
+
+       if (worker_pool_assign_id(pool) < 0)
+               goto fail;
+
+       /* create and start the initial worker */
+       if (create_and_start_worker(pool) < 0)
+               goto fail;
+
+       /* install */
+       hash_add(unbound_pool_hash, &pool->hash_node, hash);
+out_unlock:
+       mutex_unlock(&wq_pool_mutex);
+       return pool;
+fail:
+       mutex_unlock(&wq_pool_mutex);
+       if (pool)
+               put_unbound_pool(pool);
+       return NULL;
+}
+
+static void rcu_free_pwq(struct rcu_head *rcu)
+{
+       kmem_cache_free(pwq_cache,
+                       container_of(rcu, struct pool_workqueue, rcu));
+}
+
+/*
+ * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
+ * and needs to be destroyed.
+ */
+static void pwq_unbound_release_workfn(struct work_struct *work)
+{
+       struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
+                                                 unbound_release_work);
+       struct workqueue_struct *wq = pwq->wq;
+       struct worker_pool *pool = pwq->pool;
+
+       if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+               return;
+
+       /*
+        * Unlink @pwq.  Synchronization against flush_mutex isn't strictly
+        * necessary on release but do it anyway.  It's easier to verify
+        * and consistent with the linking path.
+        */
+       mutex_lock(&wq->flush_mutex);
+       spin_lock_irq(&pwq_lock);
+       list_del_rcu(&pwq->pwqs_node);
+       spin_unlock_irq(&pwq_lock);
+       mutex_unlock(&wq->flush_mutex);
+
+       put_unbound_pool(pool);
+       call_rcu_sched(&pwq->rcu, rcu_free_pwq);
+
+       /*
+        * If we're the last pwq going away, @wq is already dead and no one
+        * is gonna access it anymore.  Free it.
+        */
+       if (list_empty(&wq->pwqs))
+               kfree(wq);
+}
+
+/**
+ * pwq_adjust_max_active - update a pwq's max_active to the current setting
+ * @pwq: target pool_workqueue
+ *
+ * If @pwq isn't freezing, set @pwq->max_active to the associated
+ * workqueue's saved_max_active and activate delayed work items
+ * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
+ */
+static void pwq_adjust_max_active(struct pool_workqueue *pwq)
+{
+       struct workqueue_struct *wq = pwq->wq;
+       bool freezable = wq->flags & WQ_FREEZABLE;
+
+       /* for @wq->saved_max_active */
+       lockdep_assert_held(&pwq_lock);
+
+       /* fast exit for non-freezable wqs */
+       if (!freezable && pwq->max_active == wq->saved_max_active)
+               return;
+
+       spin_lock(&pwq->pool->lock);
+
+       if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) {
+               pwq->max_active = wq->saved_max_active;
+
+               while (!list_empty(&pwq->delayed_works) &&
+                      pwq->nr_active < pwq->max_active)
+                       pwq_activate_first_delayed(pwq);
+
+               /*
+                * Need to kick a worker after thawed or an unbound wq's
+                * max_active is bumped.  It's a slow path.  Do it always.
+                */
+               wake_up_worker(pwq->pool);
+       } else {
+               pwq->max_active = 0;
+       }
+
+       spin_unlock(&pwq->pool->lock);
+}
+
+static void init_and_link_pwq(struct pool_workqueue *pwq,
+                             struct workqueue_struct *wq,
+                             struct worker_pool *pool,
+                             struct pool_workqueue **p_last_pwq)
+{
+       BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
+
+       pwq->pool = pool;
+       pwq->wq = wq;
+       pwq->flush_color = -1;
+       pwq->refcnt = 1;
+       INIT_LIST_HEAD(&pwq->delayed_works);
+       INIT_LIST_HEAD(&pwq->mayday_node);
+       INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
+
+       mutex_lock(&wq->flush_mutex);
+       spin_lock_irq(&pwq_lock);
+
+       /*
+        * Set the matching work_color.  This is synchronized with
+        * flush_mutex to avoid confusing flush_workqueue().
+        */
+       if (p_last_pwq)
+               *p_last_pwq = first_pwq(wq);
+       pwq->work_color = wq->work_color;
+
+       /* sync max_active to the current setting */
+       pwq_adjust_max_active(pwq);
+
+       /* link in @pwq */
+       list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
+
+       spin_unlock_irq(&pwq_lock);
+       mutex_unlock(&wq->flush_mutex);
+}
+
+/**
+ * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
+ * @wq: the target workqueue
+ * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
+ *
+ * Apply @attrs to an unbound workqueue @wq.  If @attrs doesn't match the
+ * current attributes, a new pwq is created and made the first pwq which
+ * will serve all new work items.  Older pwqs are released as in-flight
+ * work items finish.  Note that a work item which repeatedly requeues
+ * itself back-to-back will stay on its current pwq.
+ *
+ * Performs GFP_KERNEL allocations.  Returns 0 on success and -errno on
+ * failure.
+ */
+int apply_workqueue_attrs(struct workqueue_struct *wq,
+                         const struct workqueue_attrs *attrs)
+{
+       struct pool_workqueue *pwq, *last_pwq;
+       struct worker_pool *pool;
+
+       /* only unbound workqueues can change attributes */
+       if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
+               return -EINVAL;
+
+       /* creating multiple pwqs breaks ordering guarantee */
+       if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+               return -EINVAL;
+
+       pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
+       if (!pwq)
+               return -ENOMEM;
+
+       pool = get_unbound_pool(attrs);
+       if (!pool) {
+               kmem_cache_free(pwq_cache, pwq);
+               return -ENOMEM;
+       }
+
+       init_and_link_pwq(pwq, wq, pool, &last_pwq);
+       if (last_pwq) {
+               spin_lock_irq(&last_pwq->pool->lock);
+               put_pwq(last_pwq);
+               spin_unlock_irq(&last_pwq->pool->lock);
+       }
+
+       return 0;
+}
+
+static int alloc_and_link_pwqs(struct workqueue_struct *wq)
+{
+       bool highpri = wq->flags & WQ_HIGHPRI;
+       int cpu;
+
+       if (!(wq->flags & WQ_UNBOUND)) {
+               wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
+               if (!wq->cpu_pwqs)
+                       return -ENOMEM;
+
+               for_each_possible_cpu(cpu) {
+                       struct pool_workqueue *pwq =
+                               per_cpu_ptr(wq->cpu_pwqs, cpu);
+                       struct worker_pool *cpu_pools =
+                               per_cpu(cpu_worker_pools, cpu);
+
+                       init_and_link_pwq(pwq, wq, &cpu_pools[highpri], NULL);
+               }
+               return 0;
+       } else {
+               return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+       }
+}
+
+static int wq_clamp_max_active(int max_active, unsigned int flags,
+                              const char *name)
+{
+       int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
+
+       if (max_active < 1 || max_active > lim)
+               pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
+                       max_active, name, 1, lim);
+
+       return clamp_val(max_active, 1, lim);
+}
+
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+                                              unsigned int flags,
+                                              int max_active,
+                                              struct lock_class_key *key,
+                                              const char *lock_name, ...)
+{
+       va_list args, args1;
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
+       size_t namelen;
+
+       /* determine namelen, allocate wq and format name */
+       va_start(args, lock_name);
+       va_copy(args1, args);
+       namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+
+       wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
+       if (!wq)
+               return NULL;
+
+       vsnprintf(wq->name, namelen, fmt, args1);
+       va_end(args);
+       va_end(args1);
+
+       max_active = max_active ?: WQ_DFL_ACTIVE;
+       max_active = wq_clamp_max_active(max_active, flags, wq->name);
+
+       /* init wq */
+       wq->flags = flags;
+       wq->saved_max_active = max_active;
+       mutex_init(&wq->flush_mutex);
+       atomic_set(&wq->nr_pwqs_to_flush, 0);
+       INIT_LIST_HEAD(&wq->pwqs);
+       INIT_LIST_HEAD(&wq->flusher_queue);
+       INIT_LIST_HEAD(&wq->flusher_overflow);
+       INIT_LIST_HEAD(&wq->maydays);
+
+       lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
+       INIT_LIST_HEAD(&wq->list);
+
+       if (alloc_and_link_pwqs(wq) < 0)
+               goto err_free_wq;
+
+       /*
+        * Workqueues which may be used during memory reclaim should
+        * have a rescuer to guarantee forward progress.
+        */
+       if (flags & WQ_MEM_RECLAIM) {
+               struct worker *rescuer;
+
+               rescuer = alloc_worker();
+               if (!rescuer)
+                       goto err_destroy;
+
+               rescuer->rescue_wq = wq;
+               rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
+                                              wq->name);
+               if (IS_ERR(rescuer->task)) {
+                       kfree(rescuer);
+                       goto err_destroy;
+               }
+
+               wq->rescuer = rescuer;
+               rescuer->task->flags |= PF_NO_SETAFFINITY;
+               wake_up_process(rescuer->task);
+       }
+
+       if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
+               goto err_destroy;
+
+       /*
+        * wq_pool_mutex protects global freeze state and workqueues list.
+        * Grab it, adjust max_active and add the new @wq to workqueues
+        * list.
+        */
+       mutex_lock(&wq_pool_mutex);
+
+       spin_lock_irq(&pwq_lock);
+       for_each_pwq(pwq, wq)
+               pwq_adjust_max_active(pwq);
+       spin_unlock_irq(&pwq_lock);
+
+       list_add(&wq->list, &workqueues);
+
+       mutex_unlock(&wq_pool_mutex);
+
+       return wq;
+
+err_free_wq:
+       kfree(wq);
+       return NULL;
+err_destroy:
+       destroy_workqueue(wq);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
+
+/**
+ * destroy_workqueue - safely terminate a workqueue
+ * @wq: target workqueue
+ *
+ * Safely destroy a workqueue. All work currently pending will be done first.
+ */
+void destroy_workqueue(struct workqueue_struct *wq)
+{
+       struct pool_workqueue *pwq;
+
+       /* drain it before proceeding with destruction */
+       drain_workqueue(wq);
+
+       /* sanity checks */
+       spin_lock_irq(&pwq_lock);
+       for_each_pwq(pwq, wq) {
+               int i;
+
+               for (i = 0; i < WORK_NR_COLORS; i++) {
+                       if (WARN_ON(pwq->nr_in_flight[i])) {
+                               spin_unlock_irq(&pwq_lock);
+                               return;
+                       }
+               }
+
+               if (WARN_ON(pwq->refcnt > 1) ||
+                   WARN_ON(pwq->nr_active) ||
+                   WARN_ON(!list_empty(&pwq->delayed_works))) {
+                       spin_unlock_irq(&pwq_lock);
+                       return;
+               }
+       }
+       spin_unlock_irq(&pwq_lock);
+
+       /*
+        * wq list is used to freeze wq, remove from list after
+        * flushing is complete in case freeze races us.
+        */
+       mutex_lock(&wq_pool_mutex);
+       list_del_init(&wq->list);
+       mutex_unlock(&wq_pool_mutex);
+
+       workqueue_sysfs_unregister(wq);
+
+       if (wq->rescuer) {
+               kthread_stop(wq->rescuer->task);
+               kfree(wq->rescuer);
+               wq->rescuer = NULL;
+       }
+
+       if (!(wq->flags & WQ_UNBOUND)) {
+               /*
+                * The base ref is never dropped on per-cpu pwqs.  Directly
+                * free the pwqs and wq.
+                */
+               free_percpu(wq->cpu_pwqs);
+               kfree(wq);
+       } else {
+               /*
+                * We're the sole accessor of @wq at this point.  Directly
+                * access the first pwq and put the base ref.  As both pwqs
+                * and pools are sched-RCU protected, the lock operations
+                * are safe.  @wq will be freed when the last pwq is
+                * released.
+                */
+               pwq = list_first_entry(&wq->pwqs, struct pool_workqueue,
+                                      pwqs_node);
+               spin_lock_irq(&pwq->pool->lock);
+               put_pwq(pwq);
+               spin_unlock_irq(&pwq->pool->lock);
+       }
+}
+EXPORT_SYMBOL_GPL(destroy_workqueue);
+
 /**
  * workqueue_set_max_active - adjust max_active of a workqueue
  * @wq: target workqueue
@@ -3408,29 +3915,37 @@ static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
  */
 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 {
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
+
+       /* disallow meddling with max_active for ordered workqueues */
+       if (WARN_ON(wq->flags & __WQ_ORDERED))
+               return;
 
        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&pwq_lock);
 
        wq->saved_max_active = max_active;
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_pwq(pwq, wq)
+               pwq_adjust_max_active(pwq);
 
-               spin_lock_irq(&gcwq->lock);
-
-               if (!(wq->flags & WQ_FREEZABLE) ||
-                   !(gcwq->flags & GCWQ_FREEZING))
-                       cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
+       spin_unlock_irq(&pwq_lock);
+}
+EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 
-               spin_unlock_irq(&gcwq->lock);
-       }
+/**
+ * current_is_workqueue_rescuer - is %current workqueue rescuer?
+ *
+ * Determine whether %current is a workqueue rescuer.  Can be used from
+ * work functions to determine whether it's being run off the rescuer task.
+ */
+bool current_is_workqueue_rescuer(void)
+{
+       struct worker *worker = current_wq_worker();
 
-       spin_unlock(&workqueue_lock);
+       return worker && worker->rescue_wq;
 }
-EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 
 /**
  * workqueue_congested - test whether a workqueue is congested
@@ -3444,28 +3959,24 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
  * RETURNS:
  * %true if congested, %false otherwise.
  */
-bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
+bool workqueue_congested(int cpu, struct workqueue_struct *wq)
 {
-       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       struct pool_workqueue *pwq;
+       bool ret;
 
-       return !list_empty(&cwq->delayed_works);
-}
-EXPORT_SYMBOL_GPL(workqueue_congested);
+       rcu_read_lock_sched();
 
-/**
- * work_cpu - return the last known associated cpu for @work
- * @work: the work of interest
- *
- * RETURNS:
- * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
- */
-unsigned int work_cpu(struct work_struct *work)
-{
-       struct global_cwq *gcwq = get_work_gcwq(work);
+       if (!(wq->flags & WQ_UNBOUND))
+               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+       else
+               pwq = first_pwq(wq);
+
+       ret = !list_empty(&pwq->delayed_works);
+       rcu_read_unlock_sched();
 
-       return gcwq ? gcwq->cpu : WORK_CPU_NONE;
+       return ret;
 }
-EXPORT_SYMBOL_GPL(work_cpu);
+EXPORT_SYMBOL_GPL(workqueue_congested);
 
 /**
  * work_busy - test whether a work is currently pending or running
@@ -3474,29 +3985,28 @@ EXPORT_SYMBOL_GPL(work_cpu);
  * Test whether @work is currently pending or running.  There is no
  * synchronization around this function and the test result is
  * unreliable and only useful as advisory hints or for debugging.
- * Especially for reentrant wqs, the pending state might hide the
- * running state.
  *
  * RETURNS:
  * OR'd bitmask of WORK_BUSY_* bits.
  */
 unsigned int work_busy(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_work_gcwq(work);
+       struct worker_pool *pool;
        unsigned long flags;
        unsigned int ret = 0;
 
-       if (!gcwq)
-               return 0;
-
-       spin_lock_irqsave(&gcwq->lock, flags);
-
        if (work_pending(work))
                ret |= WORK_BUSY_PENDING;
-       if (find_worker_executing_work(gcwq, work))
-               ret |= WORK_BUSY_RUNNING;
 
-       spin_unlock_irqrestore(&gcwq->lock, flags);
+       local_irq_save(flags);
+       pool = get_work_pool(work);
+       if (pool) {
+               spin_lock(&pool->lock);
+               if (find_worker_executing_work(pool, work))
+                       ret |= WORK_BUSY_RUNNING;
+               spin_unlock(&pool->lock);
+       }
+       local_irq_restore(flags);
 
        return ret;
 }
@@ -3506,65 +4016,45 @@ EXPORT_SYMBOL_GPL(work_busy);
  * CPU hotplug.
  *
  * There are two challenges in supporting CPU hotplug.  Firstly, there
- * are a lot of assumptions on strong associations among work, cwq and
- * gcwq which make migrating pending and scheduled works very
+ * are a lot of assumptions on strong associations among work, pwq and
+ * pool which make migrating pending and scheduled works very
  * difficult to implement without impacting hot paths.  Secondly,
- * gcwqs serve mix of short, long and very long running works making
+ * worker pools serve mix of short, long and very long running works making
  * blocked draining impractical.
  *
- * This is solved by allowing a gcwq to be disassociated from the CPU
+ * This is solved by allowing the pools to be disassociated from the CPU
  * running as an unbound one and allowing it to be reattached later if the
  * cpu comes back online.
  */
 
-/* claim manager positions of all pools */
-static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
-{
-       struct worker_pool *pool;
-
-       for_each_worker_pool(pool, gcwq)
-               mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
-       spin_lock_irq(&gcwq->lock);
-}
-
-/* release manager positions */
-static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
+static void wq_unbind_fn(struct work_struct *work)
 {
-       struct worker_pool *pool;
-
-       spin_unlock_irq(&gcwq->lock);
-       for_each_worker_pool(pool, gcwq)
-               mutex_unlock(&pool->assoc_mutex);
-}
-
-static void gcwq_unbind_fn(struct work_struct *work)
-{
-       struct global_cwq *gcwq = get_gcwq(smp_processor_id());
+       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
-       struct hlist_node *pos;
-       int i;
+       int wi;
 
-       BUG_ON(gcwq->cpu != smp_processor_id());
+       for_each_cpu_worker_pool(pool, cpu) {
+               WARN_ON_ONCE(cpu != smp_processor_id());
 
-       gcwq_claim_assoc_and_lock(gcwq);
+               mutex_lock(&pool->manager_mutex);
+               spin_lock_irq(&pool->lock);
 
-       /*
-        * We've claimed all manager positions.  Make all workers unbound
-        * and set DISASSOCIATED.  Before this, all workers except for the
-        * ones which are still executing works from before the last CPU
-        * down must be on the cpu.  After this, they may become diasporas.
-        */
-       for_each_worker_pool(pool, gcwq)
-               list_for_each_entry(worker, &pool->idle_list, entry)
+               /*
+                * We've blocked all manager operations.  Make all workers
+                * unbound and set DISASSOCIATED.  Before this, all workers
+                * except for the ones which are still executing works from
+                * before the last CPU down must be on the cpu.  After
+                * this, they may become diasporas.
+                */
+               for_each_pool_worker(worker, wi, pool)
                        worker->flags |= WORKER_UNBOUND;
 
-       for_each_busy_worker(worker, i, pos, gcwq)
-               worker->flags |= WORKER_UNBOUND;
-
-       gcwq->flags |= GCWQ_DISASSOCIATED;
+               pool->flags |= POOL_DISASSOCIATED;
 
-       gcwq_release_assoc_and_unlock(gcwq);
+               spin_unlock_irq(&pool->lock);
+               mutex_unlock(&pool->manager_mutex);
+       }
 
        /*
         * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3576,16 +4066,113 @@ static void gcwq_unbind_fn(struct work_struct *work)
        /*
         * Sched callbacks are disabled now.  Zap nr_running.  After this,
         * nr_running stays zero and need_more_worker() and keep_working()
-        * are always true as long as the worklist is not empty.  @gcwq now
-        * behaves as unbound (in terms of concurrency management) gcwq
-        * which is served by workers tied to the CPU.
+        * are always true as long as the worklist is not empty.  Pools on
+        * @cpu now behave as unbound (in terms of concurrency management)
+        * pools which are served by workers tied to the CPU.
         *
         * On return from this function, the current worker would trigger
         * unbound chain execution of pending work items if other workers
         * didn't already.
         */
-       for_each_worker_pool(pool, gcwq)
-               atomic_set(get_pool_nr_running(pool), 0);
+       for_each_cpu_worker_pool(pool, cpu)
+               atomic_set(&pool->nr_running, 0);
+}
+
+/**
+ * rebind_workers - rebind all workers of a pool to the associated CPU
+ * @pool: pool of interest
+ *
+ * @pool->cpu is coming online.  Rebind all workers to the CPU.
+ */
+static void rebind_workers(struct worker_pool *pool)
+{
+       struct worker *worker;
+       int wi;
+
+       lockdep_assert_held(&pool->manager_mutex);
+
+       /*
+        * Restore CPU affinity of all workers.  As all idle workers should
+        * be on the run-queue of the associated CPU before any local
+        * wake-ups for concurrency management happen, restore CPU affinty
+        * of all workers first and then clear UNBOUND.  As we're called
+        * from CPU_ONLINE, the following shouldn't fail.
+        */
+       for_each_pool_worker(worker, wi, pool)
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+                                                 pool->attrs->cpumask) < 0);
+
+       spin_lock_irq(&pool->lock);
+
+       for_each_pool_worker(worker, wi, pool) {
+               unsigned int worker_flags = worker->flags;
+
+               /*
+                * A bound idle worker should actually be on the runqueue
+                * of the associated CPU for local wake-ups targeting it to
+                * work.  Kick all idle workers so that they migrate to the
+                * associated CPU.  Doing this in the same loop as
+                * replacing UNBOUND with REBOUND is safe as no worker will
+                * be bound before @pool->lock is released.
+                */
+               if (worker_flags & WORKER_IDLE)
+                       wake_up_process(worker->task);
+
+               /*
+                * We want to clear UNBOUND but can't directly call
+                * worker_clr_flags() or adjust nr_running.  Atomically
+                * replace UNBOUND with another NOT_RUNNING flag REBOUND.
+                * @worker will clear REBOUND using worker_clr_flags() when
+                * it initiates the next execution cycle thus restoring
+                * concurrency management.  Note that when or whether
+                * @worker clears REBOUND doesn't affect correctness.
+                *
+                * ACCESS_ONCE() is necessary because @worker->flags may be
+                * tested without holding any lock in
+                * wq_worker_waking_up().  Without it, NOT_RUNNING test may
+                * fail incorrectly leading to premature concurrency
+                * management operations.
+                */
+               WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+               worker_flags |= WORKER_REBOUND;
+               worker_flags &= ~WORKER_UNBOUND;
+               ACCESS_ONCE(worker->flags) = worker_flags;
+       }
+
+       spin_unlock_irq(&pool->lock);
+}
+
+/**
+ * restore_unbound_workers_cpumask - restore cpumask of unbound workers
+ * @pool: unbound pool of interest
+ * @cpu: the CPU which is coming up
+ *
+ * An unbound pool may end up with a cpumask which doesn't have any online
+ * CPUs.  When a worker of such pool get scheduled, the scheduler resets
+ * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
+ * online CPU before, cpus_allowed of all its workers should be restored.
+ */
+static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
+{
+       static cpumask_t cpumask;
+       struct worker *worker;
+       int wi;
+
+       lockdep_assert_held(&pool->manager_mutex);
+
+       /* is @cpu allowed for @pool? */
+       if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
+               return;
+
+       /* is @cpu the only online CPU? */
+       cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
+       if (cpumask_weight(&cpumask) != 1)
+               return;
+
+       /* as we're called from CPU_ONLINE, the following shouldn't fail */
+       for_each_pool_worker(worker, wi, pool)
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+                                                 pool->attrs->cpumask) < 0);
 }
 
 /*
@@ -3596,34 +4183,41 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               unsigned long action,
                                               void *hcpu)
 {
-       unsigned int cpu = (unsigned long)hcpu;
-       struct global_cwq *gcwq = get_gcwq(cpu);
+       int cpu = (unsigned long)hcpu;
        struct worker_pool *pool;
+       int pi;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               for_each_worker_pool(pool, gcwq) {
-                       struct worker *worker;
-
+               for_each_cpu_worker_pool(pool, cpu) {
                        if (pool->nr_workers)
                                continue;
-
-                       worker = create_worker(pool);
-                       if (!worker)
+                       if (create_and_start_worker(pool) < 0)
                                return NOTIFY_BAD;
-
-                       spin_lock_irq(&gcwq->lock);
-                       start_worker(worker);
-                       spin_unlock_irq(&gcwq->lock);
                }
                break;
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               gcwq_claim_assoc_and_lock(gcwq);
-               gcwq->flags &= ~GCWQ_DISASSOCIATED;
-               rebind_workers(gcwq);
-               gcwq_release_assoc_and_unlock(gcwq);
+               mutex_lock(&wq_pool_mutex);
+
+               for_each_pool(pool, pi) {
+                       mutex_lock(&pool->manager_mutex);
+
+                       if (pool->cpu == cpu) {
+                               spin_lock_irq(&pool->lock);
+                               pool->flags &= ~POOL_DISASSOCIATED;
+                               spin_unlock_irq(&pool->lock);
+
+                               rebind_workers(pool);
+                       } else if (pool->cpu < 0) {
+                               restore_unbound_workers_cpumask(pool, cpu);
+                       }
+
+                       mutex_unlock(&pool->manager_mutex);
+               }
+
+               mutex_unlock(&wq_pool_mutex);
                break;
        }
        return NOTIFY_OK;
@@ -3637,13 +4231,13 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
                                                 unsigned long action,
                                                 void *hcpu)
 {
-       unsigned int cpu = (unsigned long)hcpu;
+       int cpu = (unsigned long)hcpu;
        struct work_struct unbind_work;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
                /* unbinding should happen on the local CPU */
-               INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
+               INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
                queue_work_on(cpu, system_highpri_wq, &unbind_work);
                flush_work(&unbind_work);
                break;
@@ -3677,7 +4271,7 @@ static void work_for_cpu_fn(struct work_struct *work)
  * It is up to the caller to ensure that the cpu doesn't go offline.
  * The caller must not hold any locks which would prevent @fn from completing.
  */
-long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
 {
        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
 
@@ -3695,41 +4289,41 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  * freeze_workqueues_begin - begin freezing workqueues
  *
  * Start freezing workqueues.  After this function returns, all freezable
- * workqueues will queue new works to their frozen_works list instead of
- * gcwq->worklist.
+ * workqueues will queue new works to their delayed_works list instead of
+ * pool->worklist.
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock and gcwq->lock's.
+ * Grabs and releases wq_pool_mutex, pwq_lock and pool->lock's.
  */
 void freeze_workqueues_begin(void)
 {
-       unsigned int cpu;
+       struct worker_pool *pool;
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
+       int pi;
 
-       spin_lock(&workqueue_lock);
+       mutex_lock(&wq_pool_mutex);
 
-       BUG_ON(workqueue_freezing);
+       WARN_ON_ONCE(workqueue_freezing);
        workqueue_freezing = true;
 
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               struct workqueue_struct *wq;
-
-               spin_lock_irq(&gcwq->lock);
-
-               BUG_ON(gcwq->flags & GCWQ_FREEZING);
-               gcwq->flags |= GCWQ_FREEZING;
-
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-
-                       if (cwq && wq->flags & WQ_FREEZABLE)
-                               cwq->max_active = 0;
-               }
+       /* set FREEZING */
+       for_each_pool(pool, pi) {
+               spin_lock_irq(&pool->lock);
+               WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+               pool->flags |= POOL_FREEZING;
+               spin_unlock_irq(&pool->lock);
+       }
 
-               spin_unlock_irq(&gcwq->lock);
+       /* suppress further executions by setting max_active to zero */
+       spin_lock_irq(&pwq_lock);
+       list_for_each_entry(wq, &workqueues, list) {
+               for_each_pwq(pwq, wq)
+                       pwq_adjust_max_active(pwq);
        }
+       spin_unlock_irq(&pwq_lock);
 
-       spin_unlock(&workqueue_lock);
+       mutex_unlock(&wq_pool_mutex);
 }
 
 /**
@@ -3739,7 +4333,7 @@ void freeze_workqueues_begin(void)
  * between freeze_workqueues_begin() and thaw_workqueues().
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock.
+ * Grabs and releases wq_pool_mutex.
  *
  * RETURNS:
  * %true if some freezable workqueues are still busy.  %false if freezing
@@ -3747,34 +4341,34 @@ void freeze_workqueues_begin(void)
  */
 bool freeze_workqueues_busy(void)
 {
-       unsigned int cpu;
        bool busy = false;
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
 
-       spin_lock(&workqueue_lock);
+       mutex_lock(&wq_pool_mutex);
 
-       BUG_ON(!workqueue_freezing);
+       WARN_ON_ONCE(!workqueue_freezing);
 
-       for_each_gcwq_cpu(cpu) {
-               struct workqueue_struct *wq;
+       list_for_each_entry(wq, &workqueues, list) {
+               if (!(wq->flags & WQ_FREEZABLE))
+                       continue;
                /*
                 * nr_active is monotonically decreasing.  It's safe
                 * to peek without lock.
                 */
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-
-                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
-                               continue;
-
-                       BUG_ON(cwq->nr_active < 0);
-                       if (cwq->nr_active) {
+               rcu_read_lock_sched();
+               for_each_pwq(pwq, wq) {
+                       WARN_ON_ONCE(pwq->nr_active < 0);
+                       if (pwq->nr_active) {
                                busy = true;
+                               rcu_read_unlock_sched();
                                goto out_unlock;
                        }
                }
+               rcu_read_unlock_sched();
        }
 out_unlock:
-       spin_unlock(&workqueue_lock);
+       mutex_unlock(&wq_pool_mutex);
        return busy;
 }
 
@@ -3782,110 +4376,99 @@ out_unlock:
  * thaw_workqueues - thaw workqueues
  *
  * Thaw workqueues.  Normal queueing is restored and all collected
- * frozen works are transferred to their respective gcwq worklists.
+ * frozen works are transferred to their respective pool worklists.
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock and gcwq->lock's.
+ * Grabs and releases wq_pool_mutex, pwq_lock and pool->lock's.
  */
 void thaw_workqueues(void)
 {
-       unsigned int cpu;
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
+       struct worker_pool *pool;
+       int pi;
 
-       spin_lock(&workqueue_lock);
+       mutex_lock(&wq_pool_mutex);
 
        if (!workqueue_freezing)
                goto out_unlock;
 
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               struct worker_pool *pool;
-               struct workqueue_struct *wq;
-
-               spin_lock_irq(&gcwq->lock);
-
-               BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
-               gcwq->flags &= ~GCWQ_FREEZING;
-
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-
-                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
-                               continue;
-
-                       /* restore max_active and repopulate worklist */
-                       cwq_set_max_active(cwq, wq->saved_max_active);
-               }
-
-               for_each_worker_pool(pool, gcwq)
-                       wake_up_worker(pool);
+       /* clear FREEZING */
+       for_each_pool(pool, pi) {
+               spin_lock_irq(&pool->lock);
+               WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
+               pool->flags &= ~POOL_FREEZING;
+               spin_unlock_irq(&pool->lock);
+       }
 
-               spin_unlock_irq(&gcwq->lock);
+       /* restore max_active and repopulate worklist */
+       spin_lock_irq(&pwq_lock);
+       list_for_each_entry(wq, &workqueues, list) {
+               for_each_pwq(pwq, wq)
+                       pwq_adjust_max_active(pwq);
        }
+       spin_unlock_irq(&pwq_lock);
 
        workqueue_freezing = false;
 out_unlock:
-       spin_unlock(&workqueue_lock);
+       mutex_unlock(&wq_pool_mutex);
 }
 #endif /* CONFIG_FREEZER */
 
 static int __init init_workqueues(void)
 {
-       unsigned int cpu;
-       int i;
+       int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
+       int i, cpu;
 
-       /* make sure we have enough bits for OFFQ CPU number */
-       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
-                    WORK_CPU_LAST);
+       /* make sure we have enough bits for OFFQ pool ID */
+       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
+                    WORK_CPU_END * NR_STD_WORKER_POOLS);
+
+       WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+
+       pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
-       /* initialize gcwqs */
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       /* initialize CPU pools */
+       for_each_possible_cpu(cpu) {
                struct worker_pool *pool;
 
-               spin_lock_init(&gcwq->lock);
-               gcwq->cpu = cpu;
-               gcwq->flags |= GCWQ_DISASSOCIATED;
-
-               for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
-                       INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
-
-               for_each_worker_pool(pool, gcwq) {
-                       pool->gcwq = gcwq;
-                       INIT_LIST_HEAD(&pool->worklist);
-                       INIT_LIST_HEAD(&pool->idle_list);
-
-                       init_timer_deferrable(&pool->idle_timer);
-                       pool->idle_timer.function = idle_worker_timeout;
-                       pool->idle_timer.data = (unsigned long)pool;
-
-                       setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
-                                   (unsigned long)pool);
-
-                       mutex_init(&pool->assoc_mutex);
-                       ida_init(&pool->worker_ida);
+               i = 0;
+               for_each_cpu_worker_pool(pool, cpu) {
+                       BUG_ON(init_worker_pool(pool));
+                       pool->cpu = cpu;
+                       cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
+                       pool->attrs->nice = std_nice[i++];
+
+                       /* alloc pool ID */
+                       mutex_lock(&wq_pool_mutex);
+                       BUG_ON(worker_pool_assign_id(pool));
+                       mutex_unlock(&wq_pool_mutex);
                }
        }
 
        /* create the initial worker */
-       for_each_online_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_online_cpu(cpu) {
                struct worker_pool *pool;
 
-               if (cpu != WORK_CPU_UNBOUND)
-                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
+               for_each_cpu_worker_pool(pool, cpu) {
+                       pool->flags &= ~POOL_DISASSOCIATED;
+                       BUG_ON(create_and_start_worker(pool) < 0);
+               }
+       }
 
-               for_each_worker_pool(pool, gcwq) {
-                       struct worker *worker;
+       /* create default unbound wq attrs */
+       for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+               struct workqueue_attrs *attrs;
 
-                       worker = create_worker(pool);
-                       BUG_ON(!worker);
-                       spin_lock_irq(&gcwq->lock);
-                       start_worker(worker);
-                       spin_unlock_irq(&gcwq->lock);
-               }
+               BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+
+               attrs->nice = std_nice[i];
+               cpumask_setall(attrs->cpumask);
+
+               unbound_std_wq_attrs[i] = attrs;
        }
 
        system_wq = alloc_workqueue("events", 0, 0);