workqueue: un-GPL function delayed_work_timer_fn()
[pandora-kernel.git] / kernel / workqueue.c
index fd400f8..f4feaca 100644 (file)
@@ -123,7 +123,6 @@ enum {
 /* struct worker is defined in workqueue_internal.h */
 
 struct worker_pool {
-       struct global_cwq       *gcwq;          /* I: the owning gcwq */
        spinlock_t              lock;           /* the pool lock */
        unsigned int            cpu;            /* I: the associated cpu */
        int                     id;             /* I: pool ID */
@@ -145,24 +144,22 @@ struct worker_pool {
 
        struct mutex            assoc_mutex;    /* protect POOL_DISASSOCIATED */
        struct ida              worker_ida;     /* L: for worker IDs */
-};
 
-/*
- * Global per-cpu workqueue.  There's one and only one for each cpu
- * and all works are queued and processed here regardless of their
- * target workqueues.
- */
-struct global_cwq {
-       struct worker_pool      pools[NR_STD_WORKER_POOLS];
-                                               /* normal and highpri pools */
+       /*
+        * The current concurrency level.  As it's likely to be accessed
+        * from other CPUs during try_to_wake_up(), put it in a separate
+        * cacheline.
+        */
+       atomic_t                nr_running ____cacheline_aligned_in_smp;
 } ____cacheline_aligned_in_smp;
 
 /*
- * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
- * work_struct->data are used for flags and thus cwqs need to be
- * aligned at two's power of the number of flag bits.
+ * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
+ * of work_struct->data are used for flags and the remaining high bits
+ * point to the pwq; thus, pwqs need to be aligned at two's power of the
+ * number of flag bits.
  */
-struct cpu_workqueue_struct {
+struct pool_workqueue {
        struct worker_pool      *pool;          /* I: the associated pool */
        struct workqueue_struct *wq;            /* I: the owning workqueue */
        int                     work_color;     /* L: current color */
@@ -211,16 +208,16 @@ typedef unsigned long mayday_mask_t;
 struct workqueue_struct {
        unsigned int            flags;          /* W: WQ_* flags */
        union {
-               struct cpu_workqueue_struct __percpu    *pcpu;
-               struct cpu_workqueue_struct             *single;
+               struct pool_workqueue __percpu          *pcpu;
+               struct pool_workqueue                   *single;
                unsigned long                           v;
-       } cpu_wq;                               /* I: cwq's */
+       } pool_wq;                              /* I: pwq's */
        struct list_head        list;           /* W: list of all workqueues */
 
        struct mutex            flush_mutex;    /* protects wq flushing */
        int                     work_color;     /* F: current work color */
        int                     flush_color;    /* F: current flush color */
-       atomic_t                nr_cwqs_to_flush; /* flush in progress */
+       atomic_t                nr_pwqs_to_flush; /* flush in progress */
        struct wq_flusher       *first_flusher; /* F: first flusher */
        struct list_head        flusher_queue;  /* F: flush waiters */
        struct list_head        flusher_overflow; /* F: flush overflow list */
@@ -229,7 +226,7 @@ struct workqueue_struct {
        struct worker           *rescuer;       /* I: rescue worker */
 
        int                     nr_drainers;    /* W: drain in progress */
-       int                     saved_max_active; /* W: saved cwq max_active */
+       int                     saved_max_active; /* W: saved pwq max_active */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
@@ -250,15 +247,15 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
 
-#define for_each_worker_pool(pool, gcwq)                               \
-       for ((pool) = &(gcwq)->pools[0];                                \
-            (pool) < &(gcwq)->pools[NR_STD_WORKER_POOLS]; (pool)++)
+#define for_each_std_worker_pool(pool, cpu)                            \
+       for ((pool) = &std_worker_pools(cpu)[0];                        \
+            (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
 
 #define for_each_busy_worker(worker, i, pos, pool)                     \
        hash_for_each(pool->busy_hash, i, pos, worker, hentry)
 
-static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
-                                 unsigned int sw)
+static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
+                               unsigned int sw)
 {
        if (cpu < nr_cpu_ids) {
                if (sw & 1) {
@@ -269,42 +266,42 @@ static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
                if (sw & 2)
                        return WORK_CPU_UNBOUND;
        }
-       return WORK_CPU_NONE;
+       return WORK_CPU_END;
 }
 
-static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
-                               struct workqueue_struct *wq)
+static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
+                                struct workqueue_struct *wq)
 {
-       return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
+       return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
 }
 
 /*
  * CPU iterators
  *
- * An extra gcwq is defined for an invalid cpu number
+ * An extra cpu number is defined using an invalid cpu number
  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
- * specific CPU.  The following iterators are similar to
- * for_each_*_cpu() iterators but also considers the unbound gcwq.
+ * specific CPU.  The following iterators are similar to for_each_*_cpu()
+ * iterators but also considers the unbound CPU.
  *
- * for_each_gcwq_cpu()         : possible CPUs + WORK_CPU_UNBOUND
- * for_each_online_gcwq_cpu()  : online CPUs + WORK_CPU_UNBOUND
- * for_each_cwq_cpu()          : possible CPUs for bound workqueues,
+ * for_each_wq_cpu()           : possible CPUs + WORK_CPU_UNBOUND
+ * for_each_online_wq_cpu()    : online CPUs + WORK_CPU_UNBOUND
+ * for_each_pwq_cpu()          : possible CPUs for bound workqueues,
  *                               WORK_CPU_UNBOUND for unbound workqueues
  */
-#define for_each_gcwq_cpu(cpu)                                         \
-       for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
+#define for_each_wq_cpu(cpu)                                           \
+       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3);           \
+            (cpu) < WORK_CPU_END;                                      \
+            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))
 
-#define for_each_online_gcwq_cpu(cpu)                                  \
-       for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
+#define for_each_online_wq_cpu(cpu)                                    \
+       for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3);             \
+            (cpu) < WORK_CPU_END;                                      \
+            (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
 
-#define for_each_cwq_cpu(cpu, wq)                                      \
-       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
+#define for_each_pwq_cpu(cpu, wq)                                      \
+       for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq));       \
+            (cpu) < WORK_CPU_END;                                      \
+            (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
@@ -428,22 +425,12 @@ static LIST_HEAD(workqueues);
 static bool workqueue_freezing;                /* W: have wqs started freezing? */
 
 /*
- * The almighty global cpu workqueues.  nr_running is the only field
- * which is expected to be used frequently by other cpus via
- * try_to_wake_up().  Put it in a separate cacheline.
+ * The CPU and unbound standard worker pools.  The unbound ones have
+ * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
  */
-static DEFINE_PER_CPU(struct global_cwq, global_cwq);
-static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_STD_WORKER_POOLS]);
-
-/*
- * Global cpu workqueue and nr_running counter for unbound gcwq.  The pools
- * for online CPUs have POOL_DISASSOCIATED set, and all their workers have
- * WORKER_UNBOUND set.
- */
-static struct global_cwq unbound_global_cwq;
-static atomic_t unbound_pool_nr_running[NR_STD_WORKER_POOLS] = {
-       [0 ... NR_STD_WORKER_POOLS - 1] = ATOMIC_INIT(0),       /* always 0 */
-};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
+                                    cpu_std_worker_pools);
+static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
 
 /* idr of all pools */
 static DEFINE_MUTEX(worker_pool_idr_mutex);
@@ -451,17 +438,17 @@ static DEFINE_IDR(worker_pool_idr);
 
 static int worker_thread(void *__worker);
 
-static int std_worker_pool_pri(struct worker_pool *pool)
+static struct worker_pool *std_worker_pools(int cpu)
 {
-       return pool - pool->gcwq->pools;
+       if (cpu != WORK_CPU_UNBOUND)
+               return per_cpu(cpu_std_worker_pools, cpu);
+       else
+               return unbound_std_worker_pools;
 }
 
-static struct global_cwq *get_gcwq(unsigned int cpu)
+static int std_worker_pool_pri(struct worker_pool *pool)
 {
-       if (cpu != WORK_CPU_UNBOUND)
-               return &per_cpu(global_cwq, cpu);
-       else
-               return &unbound_global_cwq;
+       return pool - std_worker_pools(pool->cpu);
 }
 
 /* allocate ID and assign it to @pool */
@@ -488,30 +475,19 @@ static struct worker_pool *worker_pool_by_id(int pool_id)
 
 static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
 {
-       struct global_cwq *gcwq = get_gcwq(cpu);
+       struct worker_pool *pools = std_worker_pools(cpu);
 
-       return &gcwq->pools[highpri];
+       return &pools[highpri];
 }
 
-static atomic_t *get_pool_nr_running(struct worker_pool *pool)
-{
-       int cpu = pool->cpu;
-       int idx = std_worker_pool_pri(pool);
-
-       if (cpu != WORK_CPU_UNBOUND)
-               return &per_cpu(pool_nr_running, cpu)[idx];
-       else
-               return &unbound_pool_nr_running[idx];
-}
-
-static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
-                                           struct workqueue_struct *wq)
+static struct pool_workqueue *get_pwq(unsigned int cpu,
+                                     struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND)) {
                if (likely(cpu < nr_cpu_ids))
-                       return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
+                       return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
        } else if (likely(cpu == WORK_CPU_UNBOUND))
-               return wq->cpu_wq.single;
+               return wq->pool_wq.single;
        return NULL;
 }
 
@@ -532,18 +508,18 @@ static int work_next_color(int color)
 }
 
 /*
- * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
- * contain the pointer to the queued cwq.  Once execution starts, the flag
+ * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
+ * contain the pointer to the queued pwq.  Once execution starts, the flag
  * is cleared and the high bits contain OFFQ flags and pool ID.
  *
- * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
- * and clear_work_data() can be used to set the cwq, pool or clear
+ * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
+ * and clear_work_data() can be used to set the pwq, pool or clear
  * work->data.  These functions should only be called while the work is
  * owned - ie. while the PENDING bit is set.
  *
- * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq
+ * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
  * corresponding to a work.  Pool is available once the work has been
- * queued anywhere after initialization until it is sync canceled.  cwq is
+ * queued anywhere after initialization until it is sync canceled.  pwq is
  * available only while the work item is queued.
  *
  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
@@ -558,12 +534,18 @@ static inline void set_work_data(struct work_struct *work, unsigned long data,
        atomic_long_set(&work->data, data | flags | work_static(work));
 }
 
-static void set_work_cwq(struct work_struct *work,
-                        struct cpu_workqueue_struct *cwq,
+static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
                         unsigned long extra_flags)
 {
-       set_work_data(work, (unsigned long)cwq,
-                     WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
+       set_work_data(work, (unsigned long)pwq,
+                     WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
+}
+
+static void set_work_pool_and_keep_pending(struct work_struct *work,
+                                          int pool_id)
+{
+       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
+                     WORK_STRUCT_PENDING);
 }
 
 static void set_work_pool_and_clear_pending(struct work_struct *work,
@@ -585,11 +567,11 @@ static void clear_work_data(struct work_struct *work)
        set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 }
 
-static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
+static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
 
-       if (data & WORK_STRUCT_CWQ)
+       if (data & WORK_STRUCT_PWQ)
                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
        else
                return NULL;
@@ -607,8 +589,8 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
        struct worker_pool *pool;
        int pool_id;
 
-       if (data & WORK_STRUCT_CWQ)
-               return ((struct cpu_workqueue_struct *)
+       if (data & WORK_STRUCT_PWQ)
+               return ((struct pool_workqueue *)
                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 
        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
@@ -629,9 +611,13 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
  */
 static int get_work_pool_id(struct work_struct *work)
 {
-       struct worker_pool *pool = get_work_pool(work);
+       unsigned long data = atomic_long_read(&work->data);
 
-       return pool ? pool->id : WORK_OFFQ_POOL_NONE;
+       if (data & WORK_STRUCT_PWQ)
+               return ((struct pool_workqueue *)
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
+
+       return data >> WORK_OFFQ_POOL_SHIFT;
 }
 
 static void mark_work_canceling(struct work_struct *work)
@@ -646,7 +632,7 @@ static bool work_is_canceling(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
 
-       return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
+       return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 }
 
 /*
@@ -657,7 +643,7 @@ static bool work_is_canceling(struct work_struct *work)
 
 static bool __need_more_worker(struct worker_pool *pool)
 {
-       return !atomic_read(get_pool_nr_running(pool));
+       return !atomic_read(&pool->nr_running);
 }
 
 /*
@@ -665,7 +651,7 @@ static bool __need_more_worker(struct worker_pool *pool)
  * running workers.
  *
  * Note that, because unbound workers never contribute to nr_running, this
- * function will always return %true for unbound gcwq as long as the
+ * function will always return %true for unbound pools as long as the
  * worklist isn't empty.
  */
 static bool need_more_worker(struct worker_pool *pool)
@@ -682,9 +668,8 @@ static bool may_start_working(struct worker_pool *pool)
 /* Do I need to keep working?  Called from currently running workers. */
 static bool keep_working(struct worker_pool *pool)
 {
-       atomic_t *nr_running = get_pool_nr_running(pool);
-
-       return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
+       return !list_empty(&pool->worklist) &&
+               atomic_read(&pool->nr_running) <= 1;
 }
 
 /* Do we need a new worker?  Called from manager. */
@@ -764,7 +749,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 
        if (!(worker->flags & WORKER_NOT_RUNNING)) {
                WARN_ON_ONCE(worker->pool->cpu != cpu);
-               atomic_inc(get_pool_nr_running(worker->pool));
+               atomic_inc(&worker->pool->nr_running);
        }
 }
 
@@ -788,7 +773,6 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
 {
        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
        struct worker_pool *pool;
-       atomic_t *nr_running;
 
        /*
         * Rescuers, which may not have all the fields set up like normal
@@ -799,7 +783,6 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
                return NULL;
 
        pool = worker->pool;
-       nr_running = get_pool_nr_running(pool);
 
        /* this can only happen on the local cpu */
        BUG_ON(cpu != raw_smp_processor_id());
@@ -815,7 +798,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
         * manipulating idle_list, so dereferencing idle_list without pool
         * lock is safe.
         */
-       if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
+       if (atomic_dec_and_test(&pool->nr_running) &&
+           !list_empty(&pool->worklist))
                to_wakeup = first_worker(pool);
        return to_wakeup ? to_wakeup->task : NULL;
 }
@@ -847,14 +831,12 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
         */
        if ((flags & WORKER_NOT_RUNNING) &&
            !(worker->flags & WORKER_NOT_RUNNING)) {
-               atomic_t *nr_running = get_pool_nr_running(pool);
-
                if (wakeup) {
-                       if (atomic_dec_and_test(nr_running) &&
+                       if (atomic_dec_and_test(&pool->nr_running) &&
                            !list_empty(&pool->worklist))
                                wake_up_worker(pool);
                } else
-                       atomic_dec(nr_running);
+                       atomic_dec(&pool->nr_running);
        }
 
        worker->flags |= flags;
@@ -886,7 +868,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
         */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
-                       atomic_inc(get_pool_nr_running(pool));
+                       atomic_inc(&pool->nr_running);
 }
 
 /**
@@ -979,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
                *nextp = n;
 }
 
-static void cwq_activate_delayed_work(struct work_struct *work)
+static void pwq_activate_delayed_work(struct work_struct *work)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+       struct pool_workqueue *pwq = get_work_pwq(work);
 
        trace_workqueue_activate_work(work);
-       move_linked_works(work, &cwq->pool->worklist, NULL);
+       move_linked_works(work, &pwq->pool->worklist, NULL);
        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       cwq->nr_active++;
+       pwq->nr_active++;
 }
 
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
 {
-       struct work_struct *work = list_first_entry(&cwq->delayed_works,
+       struct work_struct *work = list_first_entry(&pwq->delayed_works,
                                                    struct work_struct, entry);
 
-       cwq_activate_delayed_work(work);
+       pwq_activate_delayed_work(work);
 }
 
 /**
- * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
- * @cwq: cwq of interest
+ * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
+ * @pwq: pwq of interest
  * @color: color of work which left the queue
  *
  * A work either has completed or is removed from pending queue,
- * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ * decrement nr_in_flight of its pwq and handle workqueue flushing.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock).
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
 {
        /* ignore uncolored works */
        if (color == WORK_NO_COLOR)
                return;
 
-       cwq->nr_in_flight[color]--;
+       pwq->nr_in_flight[color]--;
 
-       cwq->nr_active--;
-       if (!list_empty(&cwq->delayed_works)) {
+       pwq->nr_active--;
+       if (!list_empty(&pwq->delayed_works)) {
                /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
+               if (pwq->nr_active < pwq->max_active)
+                       pwq_activate_first_delayed(pwq);
        }
 
        /* is flush in progress and are we at the flushing tip? */
-       if (likely(cwq->flush_color != color))
+       if (likely(pwq->flush_color != color))
                return;
 
        /* are there still in-flight works? */
-       if (cwq->nr_in_flight[color])
+       if (pwq->nr_in_flight[color])
                return;
 
-       /* this cwq is done, clear flush_color */
-       cwq->flush_color = -1;
+       /* this pwq is done, clear flush_color */
+       pwq->flush_color = -1;
 
        /*
-        * If this was the last cwq, wake up the first flusher.  It
+        * If this was the last pwq, wake up the first flusher.  It
         * will handle the rest.
         */
-       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
-               complete(&cwq->wq->first_flusher->done);
+       if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
+               complete(&pwq->wq->first_flusher->done);
 }
 
 /**
@@ -1071,6 +1053,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                               unsigned long *flags)
 {
        struct worker_pool *pool;
+       struct pool_workqueue *pwq;
 
        local_irq_save(*flags);
 
@@ -1100,34 +1083,36 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                goto fail;
 
        spin_lock(&pool->lock);
-       if (!list_empty(&work->entry)) {
+       /*
+        * work->data is guaranteed to point to pwq only while the work
+        * item is queued on pwq->wq, and both updating work->data to point
+        * to pwq on queueing and to pool on dequeueing are done under
+        * pwq->pool->lock.  This in turn guarantees that, if work->data
+        * points to pwq which is associated with a locked pool, the work
+        * item is currently queued on that pool.
+        */
+       pwq = get_work_pwq(work);
+       if (pwq && pwq->pool == pool) {
+               debug_work_deactivate(work);
+
                /*
-                * This work is queued, but perhaps we locked the wrong
-                * pool.  In that case we must see the new value after
-                * rmb(), see insert_work()->wmb().
+                * A delayed work item cannot be grabbed directly because
+                * it might have linked NO_COLOR work items which, if left
+                * on the delayed_list, will confuse pwq->nr_active
+                * management later on and cause stall.  Make sure the work
+                * item is activated before grabbing.
                 */
-               smp_rmb();
-               if (pool == get_work_pool(work)) {
-                       debug_work_deactivate(work);
+               if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
+                       pwq_activate_delayed_work(work);
 
-                       /*
-                        * A delayed work item cannot be grabbed directly
-                        * because it might have linked NO_COLOR work items
-                        * which, if left on the delayed_list, will confuse
-                        * cwq->nr_active management later on and cause
-                        * stall.  Make sure the work item is activated
-                        * before grabbing.
-                        */
-                       if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
-                               cwq_activate_delayed_work(work);
+               list_del_init(&work->entry);
+               pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
 
-                       list_del_init(&work->entry);
-                       cwq_dec_nr_in_flight(get_work_cwq(work),
-                               get_work_color(work));
+               /* work->data points to pwq iff queued, point to pool */
+               set_work_pool_and_keep_pending(work, pool->id);
 
-                       spin_unlock(&pool->lock);
-                       return 1;
-               }
+               spin_unlock(&pool->lock);
+               return 1;
        }
        spin_unlock(&pool->lock);
 fail:
@@ -1139,33 +1124,25 @@ fail:
 }
 
 /**
- * insert_work - insert a work into gcwq
- * @cwq: cwq @work belongs to
+ * insert_work - insert a work into a pool
+ * @pwq: pwq @work belongs to
  * @work: work to insert
  * @head: insertion point
  * @extra_flags: extra WORK_STRUCT_* flags to set
  *
- * Insert @work which belongs to @cwq into @gcwq after @head.
- * @extra_flags is or'd to work_struct flags.
+ * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
+ * work_struct flags.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock).
  */
-static void insert_work(struct cpu_workqueue_struct *cwq,
-                       struct work_struct *work, struct list_head *head,
-                       unsigned int extra_flags)
+static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
+                       struct list_head *head, unsigned int extra_flags)
 {
-       struct worker_pool *pool = cwq->pool;
+       struct worker_pool *pool = pwq->pool;
 
        /* we own @work, set data and link */
-       set_work_cwq(work, cwq, extra_flags);
-
-       /*
-        * Ensure that we get the right work->data if we see the
-        * result of list_add() below, see try_to_grab_pending().
-        */
-       smp_wmb();
-
+       set_work_pwq(work, pwq, extra_flags);
        list_add_tail(&work->entry, head);
 
        /*
@@ -1181,43 +1158,24 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
 
 /*
  * Test whether @work is being queued from another work executing on the
- * same workqueue.  This is rather expensive and should only be used from
- * cold paths.
+ * same workqueue.
  */
 static bool is_chained_work(struct workqueue_struct *wq)
 {
-       unsigned long flags;
-       unsigned int cpu;
-
-       for_each_gcwq_cpu(cpu) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct worker_pool *pool = cwq->pool;
-               struct worker *worker;
-               struct hlist_node *pos;
-               int i;
+       struct worker *worker;
 
-               spin_lock_irqsave(&pool->lock, flags);
-               for_each_busy_worker(worker, i, pos, pool) {
-                       if (worker->task != current)
-                               continue;
-                       spin_unlock_irqrestore(&pool->lock, flags);
-                       /*
-                        * I'm @worker, no locking necessary.  See if @work
-                        * is headed to the same workqueue.
-                        */
-                       return worker->current_cwq->wq == wq;
-               }
-               spin_unlock_irqrestore(&pool->lock, flags);
-       }
-       return false;
+       worker = current_wq_worker();
+       /*
+        * Return %true iff I'm a worker execuing a work item on @wq.  If
+        * I'm @worker, it's safe to dereference it without locking.
+        */
+       return worker && worker->current_pwq->wq == wq;
 }
 
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
-       bool highpri = wq->flags & WQ_HIGHPRI;
-       struct worker_pool *pool;
-       struct cpu_workqueue_struct *cwq;
+       struct pool_workqueue *pwq;
        struct list_head *worklist;
        unsigned int work_flags;
        unsigned int req_cpu = cpu;
@@ -1237,7 +1195,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
            WARN_ON_ONCE(!is_chained_work(wq)))
                return;
 
-       /* determine pool to use */
+       /* determine the pwq to use */
        if (!(wq->flags & WQ_UNBOUND)) {
                struct worker_pool *last_pool;
 
@@ -1250,55 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                 * work needs to be queued on that cpu to guarantee
                 * non-reentrancy.
                 */
-               pool = get_std_worker_pool(cpu, highpri);
+               pwq = get_pwq(cpu, wq);
                last_pool = get_work_pool(work);
 
-               if (last_pool && last_pool != pool) {
+               if (last_pool && last_pool != pwq->pool) {
                        struct worker *worker;
 
                        spin_lock(&last_pool->lock);
 
                        worker = find_worker_executing_work(last_pool, work);
 
-                       if (worker && worker->current_cwq->wq == wq)
-                               pool = last_pool;
-                       else {
+                       if (worker && worker->current_pwq->wq == wq) {
+                               pwq = get_pwq(last_pool->cpu, wq);
+                       else {
                                /* meh... not running there, queue here */
                                spin_unlock(&last_pool->lock);
-                               spin_lock(&pool->lock);
+                               spin_lock(&pwq->pool->lock);
                        }
                } else {
-                       spin_lock(&pool->lock);
+                       spin_lock(&pwq->pool->lock);
                }
        } else {
-               pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
-               spin_lock(&pool->lock);
+               pwq = get_pwq(WORK_CPU_UNBOUND, wq);
+               spin_lock(&pwq->pool->lock);
        }
 
-       /* pool determined, get cwq and queue */
-       cwq = get_cwq(pool->cpu, wq);
-       trace_workqueue_queue_work(req_cpu, cwq, work);
+       /* pwq determined, queue */
+       trace_workqueue_queue_work(req_cpu, pwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock(&pool->lock);
+               spin_unlock(&pwq->pool->lock);
                return;
        }
 
-       cwq->nr_in_flight[cwq->work_color]++;
-       work_flags = work_color_to_flags(cwq->work_color);
+       pwq->nr_in_flight[pwq->work_color]++;
+       work_flags = work_color_to_flags(pwq->work_color);
 
-       if (likely(cwq->nr_active < cwq->max_active)) {
+       if (likely(pwq->nr_active < pwq->max_active)) {
                trace_workqueue_activate_work(work);
-               cwq->nr_active++;
-               worklist = &cwq->pool->worklist;
+               pwq->nr_active++;
+               worklist = &pwq->pool->worklist;
        } else {
                work_flags |= WORK_STRUCT_DELAYED;
-               worklist = &cwq->delayed_works;
+               worklist = &pwq->delayed_works;
        }
 
-       insert_work(cwq, work, worklist, work_flags);
+       insert_work(pwq, work, worklist, work_flags);
 
-       spin_unlock(&pool->lock);
+       spin_unlock(&pwq->pool->lock);
 }
 
 /**
@@ -1349,19 +1306,17 @@ EXPORT_SYMBOL_GPL(queue_work);
 void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
-       struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
        /* should have been called from irqsafe timer with irq already off */
-       __queue_work(dwork->cpu, cwq->wq, &dwork->work);
+       __queue_work(dwork->cpu, dwork->wq, &dwork->work);
 }
-EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
+EXPORT_SYMBOL(delayed_work_timer_fn);
 
 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
                                struct delayed_work *dwork, unsigned long delay)
 {
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
-       unsigned int lcpu;
 
        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
                     timer->data != (unsigned long)dwork);
@@ -1381,30 +1336,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 
        timer_stats_timer_set_start_info(&dwork->timer);
 
-       /*
-        * This stores cwq for the moment, for the timer_fn.  Note that the
-        * work's pool is preserved to allow reentrance detection for
-        * delayed works.
-        */
-       if (!(wq->flags & WQ_UNBOUND)) {
-               struct worker_pool *pool = get_work_pool(work);
-
-               /*
-                * If we cannot get the last pool from @work directly,
-                * select the last CPU such that it avoids unnecessarily
-                * triggering non-reentrancy check in __queue_work().
-                */
-               lcpu = cpu;
-               if (pool)
-                       lcpu = pool->cpu;
-               if (lcpu == WORK_CPU_UNBOUND)
-                       lcpu = raw_smp_processor_id();
-       } else {
-               lcpu = WORK_CPU_UNBOUND;
-       }
-
-       set_work_cwq(work, get_cwq(lcpu, wq), 0);
-
+       dwork->wq = wq;
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
@@ -1543,14 +1475,14 @@ static void worker_enter_idle(struct worker *worker)
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
        /*
-        * Sanity check nr_running.  Because gcwq_unbind_fn() releases
+        * Sanity check nr_running.  Because wq_unbind_fn() releases
         * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
         */
        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                     pool->nr_workers == pool->nr_idle &&
-                    atomic_read(get_pool_nr_running(pool)));
+                    atomic_read(&pool->nr_running));
 }
 
 /**
@@ -1573,7 +1505,7 @@ static void worker_leave_idle(struct worker *worker)
 }
 
 /**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
+ * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
  * @worker: self
  *
  * Works which are scheduled while the cpu is online must at least be
@@ -1585,10 +1517,10 @@ static void worker_leave_idle(struct worker *worker)
  * themselves to the target cpu and may race with cpu going down or
  * coming online.  kthread_bind() can't be used because it may put the
  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
- * verbatim as it's best effort and blocking and gcwq may be
+ * verbatim as it's best effort and blocking and pool may be
  * [dis]associated in the meantime.
  *
- * This function tries set_cpus_allowed() and locks gcwq and verifies the
+ * This function tries set_cpus_allowed() and locks pool and verifies the
  * binding against %POOL_DISASSOCIATED which is set during
  * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
  * enters idle state or fetches works without dropping lock, it can
@@ -1599,7 +1531,7 @@ static void worker_leave_idle(struct worker *worker)
  * held.
  *
  * RETURNS:
- * %true if the associated gcwq is online (@worker is successfully
+ * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
 static bool worker_maybe_bind_and_lock(struct worker *worker)
@@ -1728,14 +1660,14 @@ static void rebind_workers(struct worker_pool *pool)
 
                /*
                 * wq doesn't really matter but let's keep @worker->pool
-                * and @cwq->pool consistent for sanity.
+                * and @pwq->pool consistent for sanity.
                 */
                if (std_worker_pool_pri(worker->pool))
                        wq = system_highpri_wq;
                else
                        wq = system_wq;
 
-               insert_work(get_cwq(pool->cpu, wq), rebind_work,
+               insert_work(get_pwq(pool->cpu, wq), rebind_work,
                            worker->scheduled.next,
                            work_color_to_flags(WORK_NO_COLOR));
        }
@@ -1836,7 +1768,7 @@ fail:
  * start_worker - start a newly created worker
  * @worker: worker to start
  *
- * Make the gcwq aware of @worker and start it.
+ * Make the pool aware of @worker and start it.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock).
@@ -1853,7 +1785,7 @@ static void start_worker(struct worker *worker)
  * destroy_worker - destroy a workqueue worker
  * @worker: worker to be destroyed
  *
- * Destroy @worker and adjust @gcwq stats accordingly.
+ * Destroy @worker and adjust @pool stats accordingly.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock) which is released and regrabbed.
@@ -1912,15 +1844,15 @@ static void idle_worker_timeout(unsigned long __pool)
 
 static bool send_mayday(struct work_struct *work)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
-       struct workqueue_struct *wq = cwq->wq;
+       struct pool_workqueue *pwq = get_work_pwq(work);
+       struct workqueue_struct *wq = pwq->wq;
        unsigned int cpu;
 
        if (!(wq->flags & WQ_RESCUER))
                return false;
 
        /* mayday mayday mayday */
-       cpu = cwq->pool->cpu;
+       cpu = pwq->pool->cpu;
        /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
        if (cpu == WORK_CPU_UNBOUND)
                cpu = 0;
@@ -1929,7 +1861,7 @@ static bool send_mayday(struct work_struct *work)
        return true;
 }
 
-static void gcwq_mayday_timeout(unsigned long __pool)
+static void pool_mayday_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
        struct work_struct *work;
@@ -2057,9 +1989,9 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * manage_workers - manage worker pool
  * @worker: self
  *
- * Assume the manager role and manage gcwq worker pool @worker belongs
+ * Assume the manager role and manage the worker pool @worker belongs
  * to.  At any given time, there can be only zero or one manager per
- * gcwq.  The exclusion is handled automatically by this function.
+ * pool.  The exclusion is handled automatically by this function.
  *
  * The caller can safely start processing works on false return.  On
  * true return, it's guaranteed that need_to_create_worker() is false
@@ -2102,11 +2034,11 @@ static bool manage_workers(struct worker *worker)
                 * CPU hotplug could have happened while we were waiting
                 * for assoc_mutex.  Hotplug itself can't handle us
                 * because manager isn't either on idle or busy list, and
-                * @gcwq's state and ours could have deviated.
+                * @pool's state and ours could have deviated.
                 *
                 * As hotplug is now excluded via assoc_mutex, we can
                 * simply try to bind.  It will succeed or fail depending
-                * on @gcwq's current state.  Try it and adjust
+                * on @pool's current state.  Try it and adjust
                 * %WORKER_UNBOUND accordingly.
                 */
                if (worker_maybe_bind_and_lock(worker))
@@ -2149,9 +2081,9 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
 __releases(&pool->lock)
 __acquires(&pool->lock)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+       struct pool_workqueue *pwq = get_work_pwq(work);
        struct worker_pool *pool = worker->pool;
-       bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
+       bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
        int work_color;
        struct worker *collision;
 #ifdef CONFIG_LOCKDEP
@@ -2192,7 +2124,7 @@ __acquires(&pool->lock)
        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
        worker->current_work = work;
        worker->current_func = work->func;
-       worker->current_cwq = cwq;
+       worker->current_pwq = pwq;
        work_color = get_work_color(work);
 
        list_del_init(&work->entry);
@@ -2221,7 +2153,7 @@ __acquires(&pool->lock)
 
        spin_unlock_irq(&pool->lock);
 
-       lock_map_acquire_read(&cwq->wq->lockdep_map);
+       lock_map_acquire_read(&pwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
        worker->current_func(work);
@@ -2231,7 +2163,7 @@ __acquires(&pool->lock)
         */
        trace_workqueue_execute_end(work);
        lock_map_release(&lockdep_map);
-       lock_map_release(&cwq->wq->lockdep_map);
+       lock_map_release(&pwq->wq->lockdep_map);
 
        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
@@ -2252,8 +2184,8 @@ __acquires(&pool->lock)
        hash_del(&worker->hentry);
        worker->current_work = NULL;
        worker->current_func = NULL;
-       worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       worker->current_pwq = NULL;
+       pwq_dec_nr_in_flight(pwq, work_color);
 }
 
 /**
@@ -2281,8 +2213,8 @@ static void process_scheduled_works(struct worker *worker)
  * worker_thread - the worker thread function
  * @__worker: self
  *
- * The gcwq worker thread function.  There's a single dynamic pool of
- * these per each cpu.  These workers process all works regardless of
+ * The worker thread function.  There are NR_CPU_WORKER_POOLS dynamic pools
+ * of these per each cpu.  These workers process all works regardless of
  * their specific target workqueue.  The only exception is works which
  * belong to workqueues with a rescuer which will be explained in
  * rescuer_thread().
@@ -2378,14 +2310,14 @@ sleep:
  * Workqueue rescuer thread function.  There's one rescuer for each
  * workqueue which has WQ_RESCUER set.
  *
- * Regular work processing on a gcwq may block trying to create a new
+ * Regular work processing on a pool may block trying to create a new
  * worker which uses GFP_KERNEL allocation which has slight chance of
  * developing into deadlock if some works currently on the same queue
  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
  * the problem rescuer solves.
  *
- * When such condition is possible, the gcwq summons rescuers of all
- * workqueues which have works queued on the gcwq and let them process
+ * When such condition is possible, the pool summons rescuers of all
+ * workqueues which have works queued on the pool and let them process
  * those works so that forward progress can be guaranteed.
  *
  * This should happen rarely.
@@ -2420,8 +2352,8 @@ repeat:
         */
        for_each_mayday_cpu(cpu, wq->mayday_mask) {
                unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
-               struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
-               struct worker_pool *pool = cwq->pool;
+               struct pool_workqueue *pwq = get_pwq(tcpu, wq);
+               struct worker_pool *pool = pwq->pool;
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
@@ -2437,7 +2369,7 @@ repeat:
                 */
                BUG_ON(!list_empty(&rescuer->scheduled));
                list_for_each_entry_safe(work, n, &pool->worklist, entry)
-                       if (get_work_cwq(work) == cwq)
+                       if (get_work_pwq(work) == pwq)
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
@@ -2472,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work)
 
 /**
  * insert_wq_barrier - insert a barrier work
- * @cwq: cwq to insert barrier into
+ * @pwq: pwq to insert barrier into
  * @barr: wq_barrier to insert
  * @target: target work to attach @barr to
  * @worker: worker currently executing @target, NULL if @target is not executing
@@ -2489,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work)
  * after a work with LINKED flag set.
  *
  * Note that when @worker is non-NULL, @target may be modified
- * underneath us, so we can't reliably determine cwq from @target.
+ * underneath us, so we can't reliably determine pwq from @target.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock).
  */
-static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
+static void insert_wq_barrier(struct pool_workqueue *pwq,
                              struct wq_barrier *barr,
                              struct work_struct *target, struct worker *worker)
 {
@@ -2527,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        }
 
        debug_work_activate(&barr->work);
-       insert_work(cwq, &barr->work, head,
+       insert_work(pwq, &barr->work, head,
                    work_color_to_flags(WORK_NO_COLOR) | linked);
 }
 
 /**
- * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
+ * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
  * @wq: workqueue being flushed
  * @flush_color: new flush color, < 0 for no-op
  * @work_color: new work color, < 0 for no-op
  *
- * Prepare cwqs for workqueue flushing.
+ * Prepare pwqs for workqueue flushing.
  *
- * If @flush_color is non-negative, flush_color on all cwqs should be
- * -1.  If no cwq has in-flight commands at the specified color, all
- * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
- * has in flight commands, its cwq->flush_color is set to
- * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
+ * If @flush_color is non-negative, flush_color on all pwqs should be
+ * -1.  If no pwq has in-flight commands at the specified color, all
+ * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
+ * has in flight commands, its pwq->flush_color is set to
+ * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
  * wakeup logic is armed and %true is returned.
  *
  * The caller should have initialized @wq->first_flusher prior to
@@ -2551,7 +2483,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  * @flush_color is negative, no flush color update is done and %false
  * is returned.
  *
- * If @work_color is non-negative, all cwqs should have the same
+ * If @work_color is non-negative, all pwqs should have the same
  * work_color which is previous to @work_color and all will be
  * advanced to @work_color.
  *
@@ -2562,42 +2494,42 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  * %true if @flush_color >= 0 and there's something to flush.  %false
  * otherwise.
  */
-static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
+static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
                                      int flush_color, int work_color)
 {
        bool wait = false;
        unsigned int cpu;
 
        if (flush_color >= 0) {
-               BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
-               atomic_set(&wq->nr_cwqs_to_flush, 1);
+               BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
+               atomic_set(&wq->nr_pwqs_to_flush, 1);
        }
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct worker_pool *pool = cwq->pool;
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+               struct worker_pool *pool = pwq->pool;
 
                spin_lock_irq(&pool->lock);
 
                if (flush_color >= 0) {
-                       BUG_ON(cwq->flush_color != -1);
+                       BUG_ON(pwq->flush_color != -1);
 
-                       if (cwq->nr_in_flight[flush_color]) {
-                               cwq->flush_color = flush_color;
-                               atomic_inc(&wq->nr_cwqs_to_flush);
+                       if (pwq->nr_in_flight[flush_color]) {
+                               pwq->flush_color = flush_color;
+                               atomic_inc(&wq->nr_pwqs_to_flush);
                                wait = true;
                        }
                }
 
                if (work_color >= 0) {
-                       BUG_ON(work_color != work_next_color(cwq->work_color));
-                       cwq->work_color = work_color;
+                       BUG_ON(work_color != work_next_color(pwq->work_color));
+                       pwq->work_color = work_color;
                }
 
                spin_unlock_irq(&pool->lock);
        }
 
-       if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
+       if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
                complete(&wq->first_flusher->done);
 
        return wait;
@@ -2648,7 +2580,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                        wq->first_flusher = &this_flusher;
 
-                       if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
+                       if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
                                                       wq->work_color)) {
                                /* nothing to flush, done */
                                wq->flush_color = next_color;
@@ -2659,7 +2591,7 @@ void flush_workqueue(struct workqueue_struct *wq)
                        /* wait in queue */
                        BUG_ON(wq->flush_color == this_flusher.flush_color);
                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
-                       flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
+                       flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
        } else {
                /*
@@ -2726,7 +2658,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                        list_splice_tail_init(&wq->flusher_overflow,
                                              &wq->flusher_queue);
-                       flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
+                       flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
 
                if (list_empty(&wq->flusher_queue)) {
@@ -2736,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                /*
                 * Need to flush more colors.  Make the next flusher
-                * the new first flusher and arm cwqs.
+                * the new first flusher and arm pwqs.
                 */
                BUG_ON(wq->flush_color == wq->work_color);
                BUG_ON(wq->flush_color != next->flush_color);
@@ -2744,7 +2676,7 @@ void flush_workqueue(struct workqueue_struct *wq)
                list_del_init(&next->list);
                wq->first_flusher = next;
 
-               if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
+               if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
                        break;
 
                /*
@@ -2787,13 +2719,13 @@ void drain_workqueue(struct workqueue_struct *wq)
 reflush:
        flush_workqueue(wq);
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
                bool drained;
 
-               spin_lock_irq(&cwq->pool->lock);
-               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
-               spin_unlock_irq(&cwq->pool->lock);
+               spin_lock_irq(&pwq->pool->lock);
+               drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
+               spin_unlock_irq(&pwq->pool->lock);
 
                if (drained)
                        continue;
@@ -2816,7 +2748,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 {
        struct worker *worker = NULL;
        struct worker_pool *pool;
-       struct cpu_workqueue_struct *cwq;
+       struct pool_workqueue *pwq;
 
        might_sleep();
        pool = get_work_pool(work);
@@ -2824,24 +2756,19 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
                return false;
 
        spin_lock_irq(&pool->lock);
-       if (!list_empty(&work->entry)) {
-               /*
-                * See the comment near try_to_grab_pending()->smp_rmb().
-                * If it was re-queued to a different pool under us, we
-                * are not going to wait.
-                */
-               smp_rmb();
-               cwq = get_work_cwq(work);
-               if (unlikely(!cwq || pool != cwq->pool))
+       /* see the comment in try_to_grab_pending() with the same code */
+       pwq = get_work_pwq(work);
+       if (pwq) {
+               if (unlikely(pwq->pool != pool))
                        goto already_gone;
        } else {
                worker = find_worker_executing_work(pool, work);
                if (!worker)
                        goto already_gone;
-               cwq = worker->current_cwq;
+               pwq = worker->current_pwq;
        }
 
-       insert_wq_barrier(cwq, barr, work, worker);
+       insert_wq_barrier(pwq, barr, work, worker);
        spin_unlock_irq(&pool->lock);
 
        /*
@@ -2850,11 +2777,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
         * flusher is not running on the same workqueue by verifying write
         * access.
         */
-       if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
-               lock_map_acquire(&cwq->wq->lockdep_map);
+       if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
+               lock_map_acquire(&pwq->wq->lockdep_map);
        else
-               lock_map_acquire_read(&cwq->wq->lockdep_map);
-       lock_map_release(&cwq->wq->lockdep_map);
+               lock_map_acquire_read(&pwq->wq->lockdep_map);
+       lock_map_release(&pwq->wq->lockdep_map);
 
        return true;
 already_gone:
@@ -2954,8 +2881,7 @@ bool flush_delayed_work(struct delayed_work *dwork)
 {
        local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(dwork->cpu,
-                            get_work_cwq(&dwork->work)->wq, &dwork->work);
+               __queue_work(dwork->cpu, dwork->wq, &dwork->work);
        local_irq_enable();
        return flush_work(&dwork->work);
 }
@@ -3165,46 +3091,46 @@ int keventd_up(void)
        return system_wq != NULL;
 }
 
-static int alloc_cwqs(struct workqueue_struct *wq)
+static int alloc_pwqs(struct workqueue_struct *wq)
 {
        /*
-        * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
+        * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
         * Make sure that the alignment isn't lower than that of
         * unsigned long long.
         */
-       const size_t size = sizeof(struct cpu_workqueue_struct);
+       const size_t size = sizeof(struct pool_workqueue);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
 
        if (!(wq->flags & WQ_UNBOUND))
-               wq->cpu_wq.pcpu = __alloc_percpu(size, align);
+               wq->pool_wq.pcpu = __alloc_percpu(size, align);
        else {
                void *ptr;
 
                /*
-                * Allocate enough room to align cwq and put an extra
+                * Allocate enough room to align pwq and put an extra
                 * pointer at the end pointing back to the originally
                 * allocated pointer which will be used for free.
                 */
                ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
                if (ptr) {
-                       wq->cpu_wq.single = PTR_ALIGN(ptr, align);
-                       *(void **)(wq->cpu_wq.single + 1) = ptr;
+                       wq->pool_wq.single = PTR_ALIGN(ptr, align);
+                       *(void **)(wq->pool_wq.single + 1) = ptr;
                }
        }
 
        /* just in case, make sure it's actually aligned */
-       BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
-       return wq->cpu_wq.v ? 0 : -ENOMEM;
+       BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
+       return wq->pool_wq.v ? 0 : -ENOMEM;
 }
 
-static void free_cwqs(struct workqueue_struct *wq)
+static void free_pwqs(struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND))
-               free_percpu(wq->cpu_wq.pcpu);
-       else if (wq->cpu_wq.single) {
-               /* the pointer to free is stored right after the cwq */
-               kfree(*(void **)(wq->cpu_wq.single + 1));
+               free_percpu(wq->pool_wq.pcpu);
+       else if (wq->pool_wq.single) {
+               /* the pointer to free is stored right after the pwq */
+               kfree(*(void **)(wq->pool_wq.single + 1));
        }
 }
 
@@ -3258,27 +3184,25 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
-       atomic_set(&wq->nr_cwqs_to_flush, 0);
+       atomic_set(&wq->nr_pwqs_to_flush, 0);
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
 
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
-       if (alloc_cwqs(wq) < 0)
+       if (alloc_pwqs(wq) < 0)
                goto err;
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               int pool_idx = (bool)(flags & WQ_HIGHPRI);
-
-               BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
-               cwq->pool = &gcwq->pools[pool_idx];
-               cwq->wq = wq;
-               cwq->flush_color = -1;
-               cwq->max_active = max_active;
-               INIT_LIST_HEAD(&cwq->delayed_works);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+
+               BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
+               pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
+               pwq->wq = wq;
+               pwq->flush_color = -1;
+               pwq->max_active = max_active;
+               INIT_LIST_HEAD(&pwq->delayed_works);
        }
 
        if (flags & WQ_RESCUER) {
@@ -3309,8 +3233,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        spin_lock(&workqueue_lock);
 
        if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
-               for_each_cwq_cpu(cpu, wq)
-                       get_cwq(cpu, wq)->max_active = 0;
+               for_each_pwq_cpu(cpu, wq)
+                       get_pwq(cpu, wq)->max_active = 0;
 
        list_add(&wq->list, &workqueues);
 
@@ -3319,7 +3243,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        return wq;
 err:
        if (wq) {
-               free_cwqs(wq);
+               free_pwqs(wq);
                free_mayday_mask(wq->mayday_mask);
                kfree(wq->rescuer);
                kfree(wq);
@@ -3350,14 +3274,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
        spin_unlock(&workqueue_lock);
 
        /* sanity check */
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
                int i;
 
                for (i = 0; i < WORK_NR_COLORS; i++)
-                       BUG_ON(cwq->nr_in_flight[i]);
-               BUG_ON(cwq->nr_active);
-               BUG_ON(!list_empty(&cwq->delayed_works));
+                       BUG_ON(pwq->nr_in_flight[i]);
+               BUG_ON(pwq->nr_active);
+               BUG_ON(!list_empty(&pwq->delayed_works));
        }
 
        if (wq->flags & WQ_RESCUER) {
@@ -3366,29 +3290,29 @@ void destroy_workqueue(struct workqueue_struct *wq)
                kfree(wq->rescuer);
        }
 
-       free_cwqs(wq);
+       free_pwqs(wq);
        kfree(wq);
 }
 EXPORT_SYMBOL_GPL(destroy_workqueue);
 
 /**
- * cwq_set_max_active - adjust max_active of a cwq
- * @cwq: target cpu_workqueue_struct
+ * pwq_set_max_active - adjust max_active of a pwq
+ * @pwq: target pool_workqueue
  * @max_active: new max_active value.
  *
- * Set @cwq->max_active to @max_active and activate delayed works if
+ * Set @pwq->max_active to @max_active and activate delayed works if
  * increased.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock).
  */
-static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
+static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
 {
-       cwq->max_active = max_active;
+       pwq->max_active = max_active;
 
-       while (!list_empty(&cwq->delayed_works) &&
-              cwq->nr_active < cwq->max_active)
-               cwq_activate_first_delayed(cwq);
+       while (!list_empty(&pwq->delayed_works) &&
+              pwq->nr_active < pwq->max_active)
+               pwq_activate_first_delayed(pwq);
 }
 
 /**
@@ -3411,15 +3335,15 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
        wq->saved_max_active = max_active;
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct worker_pool *pool = cwq->pool;
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+               struct worker_pool *pool = pwq->pool;
 
                spin_lock_irq(&pool->lock);
 
                if (!(wq->flags & WQ_FREEZABLE) ||
                    !(pool->flags & POOL_FREEZING))
-                       cwq_set_max_active(cwq, max_active);
+                       pwq_set_max_active(pwq, max_active);
 
                spin_unlock_irq(&pool->lock);
        }
@@ -3442,9 +3366,9 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
  */
 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
 {
-       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-       return !list_empty(&cwq->delayed_works);
+       return !list_empty(&pwq->delayed_works);
 }
 EXPORT_SYMBOL_GPL(workqueue_congested);
 
@@ -3455,8 +3379,6 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
  * Test whether @work is currently pending or running.  There is no
  * synchronization around this function and the test result is
  * unreliable and only useful as advisory hints or for debugging.
- * Especially for reentrant wqs, the pending state might hide the
- * running state.
  *
  * RETURNS:
  * OR'd bitmask of WORK_BUSY_* bits.
@@ -3467,17 +3389,15 @@ unsigned int work_busy(struct work_struct *work)
        unsigned long flags;
        unsigned int ret = 0;
 
-       if (!pool)
-               return 0;
-
-       spin_lock_irqsave(&pool->lock, flags);
-
        if (work_pending(work))
                ret |= WORK_BUSY_PENDING;
-       if (find_worker_executing_work(pool, work))
-               ret |= WORK_BUSY_RUNNING;
 
-       spin_unlock_irqrestore(&pool->lock, flags);
+       if (pool) {
+               spin_lock_irqsave(&pool->lock, flags);
+               if (find_worker_executing_work(pool, work))
+                       ret |= WORK_BUSY_RUNNING;
+               spin_unlock_irqrestore(&pool->lock, flags);
+       }
 
        return ret;
 }
@@ -3487,8 +3407,8 @@ EXPORT_SYMBOL_GPL(work_busy);
  * CPU hotplug.
  *
  * There are two challenges in supporting CPU hotplug.  Firstly, there
- * are a lot of assumptions on strong associations among work, cwq and
- * gcwq which make migrating pending and scheduled works very
+ * are a lot of assumptions on strong associations among work, pwq and
+ * pool which make migrating pending and scheduled works very
  * difficult to implement without impacting hot paths.  Secondly,
  * worker pools serve mix of short, long and very long running works making
  * blocked draining impractical.
@@ -3498,16 +3418,16 @@ EXPORT_SYMBOL_GPL(work_busy);
  * cpu comes back online.
  */
 
-static void gcwq_unbind_fn(struct work_struct *work)
+static void wq_unbind_fn(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_gcwq(smp_processor_id());
+       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
        struct hlist_node *pos;
        int i;
 
-       for_each_worker_pool(pool, gcwq) {
-               BUG_ON(pool->cpu != smp_processor_id());
+       for_each_std_worker_pool(pool, cpu) {
+               BUG_ON(cpu != smp_processor_id());
 
                mutex_lock(&pool->assoc_mutex);
                spin_lock_irq(&pool->lock);
@@ -3541,16 +3461,16 @@ static void gcwq_unbind_fn(struct work_struct *work)
        /*
         * Sched callbacks are disabled now.  Zap nr_running.  After this,
         * nr_running stays zero and need_more_worker() and keep_working()
-        * are always true as long as the worklist is not empty.  @gcwq now
-        * behaves as unbound (in terms of concurrency management) gcwq
-        * which is served by workers tied to the CPU.
+        * are always true as long as the worklist is not empty.  Pools on
+        * @cpu now behave as unbound (in terms of concurrency management)
+        * pools which are served by workers tied to the CPU.
         *
         * On return from this function, the current worker would trigger
         * unbound chain execution of pending work items if other workers
         * didn't already.
         */
-       for_each_worker_pool(pool, gcwq)
-               atomic_set(get_pool_nr_running(pool), 0);
+       for_each_std_worker_pool(pool, cpu)
+               atomic_set(&pool->nr_running, 0);
 }
 
 /*
@@ -3562,12 +3482,11 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
-       struct global_cwq *gcwq = get_gcwq(cpu);
        struct worker_pool *pool;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        if (pool->nr_workers)
@@ -3585,7 +3504,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        mutex_lock(&pool->assoc_mutex);
                        spin_lock_irq(&pool->lock);
 
@@ -3614,7 +3533,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
                /* unbinding should happen on the local CPU */
-               INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
+               INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
                queue_work_on(cpu, system_highpri_wq, &unbind_work);
                flush_work(&unbind_work);
                break;
@@ -3667,7 +3586,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  *
  * Start freezing workqueues.  After this function returns, all freezable
  * workqueues will queue new works to their frozen_works list instead of
- * gcwq->worklist.
+ * pool->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and pool->lock's.
@@ -3681,30 +3600,26 @@ void freeze_workqueues_begin(void)
        BUG_ON(workqueue_freezing);
        workqueue_freezing = true;
 
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_wq_cpu(cpu) {
                struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               local_irq_disable();
-
-               for_each_worker_pool(pool, gcwq) {
-                       spin_lock_nested(&pool->lock, pool - gcwq->pools);
+               for_each_std_worker_pool(pool, cpu) {
+                       spin_lock_irq(&pool->lock);
 
                        WARN_ON_ONCE(pool->flags & POOL_FREEZING);
                        pool->flags |= POOL_FREEZING;
-               }
 
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+                       list_for_each_entry(wq, &workqueues, list) {
+                               struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-                       if (cwq && wq->flags & WQ_FREEZABLE)
-                               cwq->max_active = 0;
-               }
+                               if (pwq && pwq->pool == pool &&
+                                   (wq->flags & WQ_FREEZABLE))
+                                       pwq->max_active = 0;
+                       }
 
-               for_each_worker_pool(pool, gcwq)
-                       spin_unlock(&pool->lock);
-               local_irq_enable();
+                       spin_unlock_irq(&pool->lock);
+               }
        }
 
        spin_unlock(&workqueue_lock);
@@ -3732,20 +3647,20 @@ bool freeze_workqueues_busy(void)
 
        BUG_ON(!workqueue_freezing);
 
-       for_each_gcwq_cpu(cpu) {
+       for_each_wq_cpu(cpu) {
                struct workqueue_struct *wq;
                /*
                 * nr_active is monotonically decreasing.  It's safe
                 * to peek without lock.
                 */
                list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+                       struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
+                       if (!pwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
-                       BUG_ON(cwq->nr_active < 0);
-                       if (cwq->nr_active) {
+                       BUG_ON(pwq->nr_active < 0);
+                       if (pwq->nr_active) {
                                busy = true;
                                goto out_unlock;
                        }
@@ -3760,7 +3675,7 @@ out_unlock:
  * thaw_workqueues - thaw workqueues
  *
  * Thaw workqueues.  Normal queueing is restored and all collected
- * frozen works are transferred to their respective gcwq worklists.
+ * frozen works are transferred to their respective pool worklists.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and pool->lock's.
@@ -3774,35 +3689,31 @@ void thaw_workqueues(void)
        if (!workqueue_freezing)
                goto out_unlock;
 
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_wq_cpu(cpu) {
                struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               local_irq_disable();
-
-               for_each_worker_pool(pool, gcwq) {
-                       spin_lock_nested(&pool->lock, pool - gcwq->pools);
+               for_each_std_worker_pool(pool, cpu) {
+                       spin_lock_irq(&pool->lock);
 
                        WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
                        pool->flags &= ~POOL_FREEZING;
-               }
 
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+                       list_for_each_entry(wq, &workqueues, list) {
+                               struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
-                               continue;
+                               if (!pwq || pwq->pool != pool ||
+                                   !(wq->flags & WQ_FREEZABLE))
+                                       continue;
 
-                       /* restore max_active and repopulate worklist */
-                       cwq_set_max_active(cwq, wq->saved_max_active);
-               }
+                               /* restore max_active and repopulate worklist */
+                               pwq_set_max_active(pwq, wq->saved_max_active);
+                       }
 
-               for_each_worker_pool(pool, gcwq) {
                        wake_up_worker(pool);
-                       spin_unlock(&pool->lock);
+
+                       spin_unlock_irq(&pool->lock);
                }
-               local_irq_enable();
        }
 
        workqueue_freezing = false;
@@ -3817,18 +3728,16 @@ static int __init init_workqueues(void)
 
        /* make sure we have enough bits for OFFQ pool ID */
        BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
-                    WORK_CPU_LAST * NR_STD_WORKER_POOLS);
+                    WORK_CPU_END * NR_STD_WORKER_POOLS);
 
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
-       /* initialize gcwqs */
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       /* initialize CPU pools */
+       for_each_wq_cpu(cpu) {
                struct worker_pool *pool;
 
-               for_each_worker_pool(pool, gcwq) {
-                       pool->gcwq = gcwq;
+               for_each_std_worker_pool(pool, cpu) {
                        spin_lock_init(&pool->lock);
                        pool->cpu = cpu;
                        pool->flags |= POOL_DISASSOCIATED;
@@ -3840,7 +3749,7 @@ static int __init init_workqueues(void)
                        pool->idle_timer.function = idle_worker_timeout;
                        pool->idle_timer.data = (unsigned long)pool;
 
-                       setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
+                       setup_timer(&pool->mayday_timer, pool_mayday_timeout,
                                    (unsigned long)pool);
 
                        mutex_init(&pool->assoc_mutex);
@@ -3852,11 +3761,10 @@ static int __init init_workqueues(void)
        }
 
        /* create the initial worker */
-       for_each_online_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_online_wq_cpu(cpu) {
                struct worker_pool *pool;
 
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        if (cpu != WORK_CPU_UNBOUND)