WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
+ WORKER_REBOUND = 1 << 8, /* worker was rebound */
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
- WORKER_CPU_INTENSIVE,
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
+ WORKER_UNBOUND | WORKER_REBOUND,
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
* MG: pool->manager_mutex and pool->lock protected. Writes require both
* locks. Reads can happen under either lock.
*
- * WQ: wq_mutex protected.
+ * PL: wq_pool_mutex protected.
*
- * WR: wq_mutex protected for writes. Sched-RCU protected for reads.
+ * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
*
* PW: pwq_lock protected.
*
struct idr worker_idr; /* MG: worker IDs and iteration */
struct workqueue_attrs *attrs; /* I: worker attributes */
- struct hlist_node hash_node; /* WQ: unbound_pool_hash node */
- int refcnt; /* WQ: refcnt for unbound pools */
+ struct hlist_node hash_node; /* PL: unbound_pool_hash node */
+ int refcnt; /* PL: refcnt for unbound pools */
/*
* The current concurrency level. As it's likely to be accessed
* the appropriate worker_pool through its pool_workqueues.
*/
struct workqueue_struct {
- unsigned int flags; /* WQ: WQ_* flags */
+ unsigned int flags; /* PL: WQ_* flags */
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
struct list_head pwqs; /* FR: all pwqs of this wq */
- struct list_head list; /* WQ: list of all workqueues */
+ struct list_head list; /* PL: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */
int work_color; /* F: current work color */
struct list_head maydays; /* MD: pwqs requesting rescue */
struct worker *rescuer; /* I: rescue worker */
- int nr_drainers; /* WQ: drain in progress */
+ int nr_drainers; /* PL: drain in progress */
int saved_max_active; /* PW: saved pwq max_active */
#ifdef CONFIG_SYSFS
static struct kmem_cache *pwq_cache;
-static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */
+static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
-static LIST_HEAD(workqueues); /* WQ: list of all workqueues */
-static bool workqueue_freezing; /* WQ: have wqs started freezing? */
+static LIST_HEAD(workqueues); /* PL: list of all workqueues */
+static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
cpu_worker_pools);
-static DEFINE_IDR(worker_pool_idr); /* WR: idr of all pools */
+static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
-/* WQ: hash of all unbound pools keyed by pool->attrs */
+/* PL: hash of all unbound pools keyed by pool->attrs */
static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
-#define assert_rcu_or_wq_mutex() \
+#define assert_rcu_or_pool_mutex() \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \
- lockdep_is_held(&wq_mutex), \
- "sched RCU or wq_mutex should be held")
+ lockdep_is_held(&wq_pool_mutex), \
+ "sched RCU or wq_pool_mutex should be held")
#define assert_rcu_or_pwq_lock() \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \
#ifdef CONFIG_LOCKDEP
#define assert_manager_or_pool_lock(pool) \
- WARN_ONCE(!lockdep_is_held(&(pool)->manager_mutex) && \
+ WARN_ONCE(debug_locks && \
+ !lockdep_is_held(&(pool)->manager_mutex) && \
!lockdep_is_held(&(pool)->lock), \
"pool->manager_mutex or ->lock should be held")
#else
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
(pool)++)
-#define for_each_busy_worker(worker, i, pool) \
- hash_for_each(pool->busy_hash, i, worker, hentry)
-
/**
* for_each_pool - iterate through all worker_pools in the system
* @pool: iteration cursor
* @pi: integer used for iteration
*
- * This must be called either with wq_mutex held or sched RCU read locked.
- * If the pool needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pool stays online.
+ * This must be called either with wq_pool_mutex held or sched RCU read
+ * locked. If the pool needs to be used beyond the locking in effect, the
+ * caller is responsible for guaranteeing that the pool stays online.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pool(pool, pi) \
idr_for_each_entry(&worker_pool_idr, pool, pi) \
- if (({ assert_rcu_or_wq_mutex(); false; })) { } \
+ if (({ assert_rcu_or_pool_mutex(); false; })) { } \
else
/**
{
int ret;
- lockdep_assert_held(&wq_mutex);
+ lockdep_assert_held(&wq_pool_mutex);
do {
if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
*
* Return the worker_pool @work was last associated with. %NULL if none.
*
- * Pools are created and destroyed under wq_mutex, and allows read access
- * under sched-RCU read lock. As such, this function should be called
- * under wq_mutex or with preemption disabled.
+ * Pools are created and destroyed under wq_pool_mutex, and allows read
+ * access under sched-RCU read lock. As such, this function should be
+ * called under wq_pool_mutex or with preemption disabled.
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
unsigned long data = atomic_long_read(&work->data);
int pool_id;
- assert_rcu_or_wq_mutex();
+ assert_rcu_or_pool_mutex();
if (data & WORK_STRUCT_PWQ)
return ((struct pool_workqueue *)
}
}
-/*
- * Rebind an idle @worker to its CPU. worker_thread() will test
- * list_empty(@worker->entry) before leaving idle and call this function.
- */
-static void idle_worker_rebind(struct worker *worker)
-{
- /* CPU may go down again inbetween, clear UNBOUND only on success */
- if (worker_maybe_bind_and_lock(worker->pool))
- worker_clr_flags(worker, WORKER_UNBOUND);
-
- /* rebind complete, become available again */
- list_add(&worker->entry, &worker->pool->idle_list);
- spin_unlock_irq(&worker->pool->lock);
-}
-
-/*
- * Function for @worker->rebind.work used to rebind unbound busy workers to
- * the associated cpu which is coming back online. This is scheduled by
- * cpu up but can race with other cpu hotplug operations and may be
- * executed twice without intervening cpu down.
- */
-static void busy_worker_rebind_fn(struct work_struct *work)
-{
- struct worker *worker = container_of(work, struct worker, rebind_work);
-
- if (worker_maybe_bind_and_lock(worker->pool))
- worker_clr_flags(worker, WORKER_UNBOUND);
-
- spin_unlock_irq(&worker->pool->lock);
-}
-
static struct worker *alloc_worker(void)
{
struct worker *worker;
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
- INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
spin_unlock_irq(&pool->lock);
mutex_lock(&pool->manager_mutex);
- /*
- * CPU hotplug could have happened while we were waiting
- * for assoc_mutex. Hotplug itself can't handle us
- * because manager isn't either on idle or busy list, and
- * @pool's state and ours could have deviated.
- *
- * As hotplug is now excluded via manager_mutex, we can
- * simply try to bind. It will succeed or fail depending
- * on @pool's current state. Try it and adjust
- * %WORKER_UNBOUND accordingly.
- */
- if (worker_maybe_bind_and_lock(pool))
- worker->flags &= ~WORKER_UNBOUND;
- else
- worker->flags |= WORKER_UNBOUND;
-
ret = true;
}
woke_up:
spin_lock_irq(&pool->lock);
- /* we are off idle list if destruction or rebind is requested */
- if (unlikely(list_empty(&worker->entry))) {
+ /* am I supposed to die? */
+ if (unlikely(worker->flags & WORKER_DIE)) {
spin_unlock_irq(&pool->lock);
-
- /* if DIE is set, destruction is requested */
- if (worker->flags & WORKER_DIE) {
- worker->task->flags &= ~PF_WQ_WORKER;
- return 0;
- }
-
- /* otherwise, rebind */
- idle_worker_rebind(worker);
- goto woke_up;
+ WARN_ON_ONCE(!list_empty(&worker->entry));
+ worker->task->flags &= ~PF_WQ_WORKER;
+ return 0;
}
worker_leave_idle(worker);
WARN_ON_ONCE(!list_empty(&worker->scheduled));
/*
- * When control reaches this point, we're guaranteed to have
- * at least one idle worker or that someone else has already
- * assumed the manager role.
+ * Finish PREP stage. We're guaranteed to have at least one idle
+ * worker or that someone else has already assumed the manager
+ * role. This is where @worker starts participating in concurrency
+ * management if applicable and concurrency management is restored
+ * after being rebound. See rebind_workers() for details.
*/
- worker_clr_flags(worker, WORKER_PREP);
+ worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
do {
struct work_struct *work =
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
if (!wq->nr_drainers++)
wq->flags |= __WQ_DRAINING;
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
reflush:
flush_workqueue(wq);
local_irq_enable();
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
if (!--wq->nr_drainers)
wq->flags &= ~__WQ_DRAINING;
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
{
struct worker *worker;
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
if (--pool->refcnt) {
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
return;
}
/* sanity checks */
if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
WARN_ON(!list_empty(&pool->worklist))) {
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
return;
}
idr_remove(&worker_pool_idr, pool->id);
hash_del(&pool->hash_node);
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
/*
* Become the manager and destroy all workers. Grabbing
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
/* do we already have a matching pool? */
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
if (!pool || init_worker_pool(pool) < 0)
goto fail;
+ if (workqueue_freezing)
+ pool->flags |= POOL_FREEZING;
+
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
/* install */
hash_add(unbound_pool_hash, &pool->hash_node, hash);
out_unlock:
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
return pool;
fail:
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
if (pool)
put_unbound_pool(pool);
return NULL;
while (!list_empty(&pwq->delayed_works) &&
pwq->nr_active < pwq->max_active)
pwq_activate_first_delayed(pwq);
+
+ /*
+ * Need to kick a worker after thawed or an unbound wq's
+ * max_active is bumped. It's a slow path. Do it always.
+ */
+ wake_up_worker(pwq->pool);
} else {
pwq->max_active = 0;
}
goto err_destroy;
/*
- * wq_mutex protects global freeze state and workqueues list. Grab
- * it, adjust max_active and add the new @wq to workqueues list.
+ * wq_pool_mutex protects global freeze state and workqueues list.
+ * Grab it, adjust max_active and add the new @wq to workqueues
+ * list.
*/
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
spin_lock_irq(&pwq_lock);
for_each_pwq(pwq, wq)
list_add(&wq->list, &workqueues);
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
return wq;
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
list_del_init(&wq->list);
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
workqueue_sysfs_unregister(wq);
{
struct worker *worker = current_wq_worker();
- return worker && worker == worker->current_pwq->wq->rescuer;
+ return worker && worker->rescue_wq;
}
/**
struct pool_workqueue *pwq;
bool ret;
- preempt_disable();
+ rcu_read_lock_sched();
if (!(wq->flags & WQ_UNBOUND))
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
pwq = first_pwq(wq);
ret = !list_empty(&pwq->delayed_works);
- preempt_enable();
+ rcu_read_unlock_sched();
return ret;
}
int cpu = smp_processor_id();
struct worker_pool *pool;
struct worker *worker;
- int i;
+ int wi;
for_each_cpu_worker_pool(pool, cpu) {
WARN_ON_ONCE(cpu != smp_processor_id());
* before the last CPU down must be on the cpu. After
* this, they may become diasporas.
*/
- list_for_each_entry(worker, &pool->idle_list, entry)
- worker->flags |= WORKER_UNBOUND;
-
- for_each_busy_worker(worker, i, pool)
+ for_each_pool_worker(worker, wi, pool)
worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED;
* rebind_workers - rebind all workers of a pool to the associated CPU
* @pool: pool of interest
*
- * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding
- * is different for idle and busy ones.
- *
- * Idle ones will be removed from the idle_list and woken up. They will
- * add themselves back after completing rebind. This ensures that the
- * idle_list doesn't contain any unbound workers when re-bound busy workers
- * try to perform local wake-ups for concurrency management.
- *
- * Busy workers can rebind after they finish their current work items.
- * Queueing the rebind work item at the head of the scheduled list is
- * enough. Note that nr_running will be properly bumped as busy workers
- * rebind.
- *
- * On return, all non-manager workers are scheduled for rebind - see
- * manage_workers() for the manager special case. Any idle worker
- * including the manager will not appear on @idle_list until rebind is
- * complete, making local wake-ups safe.
+ * @pool->cpu is coming online. Rebind all workers to the CPU.
*/
static void rebind_workers(struct worker_pool *pool)
{
- struct worker *worker, *n;
- int i;
+ struct worker *worker;
+ int wi;
lockdep_assert_held(&pool->manager_mutex);
- lockdep_assert_held(&pool->lock);
- /* dequeue and kick idle ones */
- list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
+ /*
+ * Restore CPU affinity of all workers. As all idle workers should
+ * be on the run-queue of the associated CPU before any local
+ * wake-ups for concurrency management happen, restore CPU affinty
+ * of all workers first and then clear UNBOUND. As we're called
+ * from CPU_ONLINE, the following shouldn't fail.
+ */
+ for_each_pool_worker(worker, wi, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
+
+ spin_lock_irq(&pool->lock);
+
+ for_each_pool_worker(worker, wi, pool) {
+ unsigned int worker_flags = worker->flags;
+
/*
- * idle workers should be off @pool->idle_list until rebind
- * is complete to avoid receiving premature local wake-ups.
+ * A bound idle worker should actually be on the runqueue
+ * of the associated CPU for local wake-ups targeting it to
+ * work. Kick all idle workers so that they migrate to the
+ * associated CPU. Doing this in the same loop as
+ * replacing UNBOUND with REBOUND is safe as no worker will
+ * be bound before @pool->lock is released.
*/
- list_del_init(&worker->entry);
+ if (worker_flags & WORKER_IDLE)
+ wake_up_process(worker->task);
/*
- * worker_thread() will see the above dequeuing and call
- * idle_worker_rebind().
+ * We want to clear UNBOUND but can't directly call
+ * worker_clr_flags() or adjust nr_running. Atomically
+ * replace UNBOUND with another NOT_RUNNING flag REBOUND.
+ * @worker will clear REBOUND using worker_clr_flags() when
+ * it initiates the next execution cycle thus restoring
+ * concurrency management. Note that when or whether
+ * @worker clears REBOUND doesn't affect correctness.
+ *
+ * ACCESS_ONCE() is necessary because @worker->flags may be
+ * tested without holding any lock in
+ * wq_worker_waking_up(). Without it, NOT_RUNNING test may
+ * fail incorrectly leading to premature concurrency
+ * management operations.
*/
- wake_up_process(worker->task);
+ WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+ worker_flags |= WORKER_REBOUND;
+ worker_flags &= ~WORKER_UNBOUND;
+ ACCESS_ONCE(worker->flags) = worker_flags;
}
- /* rebind busy workers */
- for_each_busy_worker(worker, i, pool) {
- struct work_struct *rebind_work = &worker->rebind_work;
- struct workqueue_struct *wq;
+ spin_unlock_irq(&pool->lock);
+}
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
+/**
+ * restore_unbound_workers_cpumask - restore cpumask of unbound workers
+ * @pool: unbound pool of interest
+ * @cpu: the CPU which is coming up
+ *
+ * An unbound pool may end up with a cpumask which doesn't have any online
+ * CPUs. When a worker of such pool get scheduled, the scheduler resets
+ * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
+ * online CPU before, cpus_allowed of all its workers should be restored.
+ */
+static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
+{
+ static cpumask_t cpumask;
+ struct worker *worker;
+ int wi;
- debug_work_activate(rebind_work);
+ lockdep_assert_held(&pool->manager_mutex);
- /*
- * wq doesn't really matter but let's keep @worker->pool
- * and @pwq->pool consistent for sanity.
- */
- if (worker->pool->attrs->nice < 0)
- wq = system_highpri_wq;
- else
- wq = system_wq;
+ /* is @cpu allowed for @pool? */
+ if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
+ return;
- insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
+ /* is @cpu the only online CPU? */
+ cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
+ if (cpumask_weight(&cpumask) != 1)
+ return;
+
+ /* as we're called from CPU_ONLINE, the following shouldn't fail */
+ for_each_pool_worker(worker, wi, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
}
/*
{
int cpu = (unsigned long)hcpu;
struct worker_pool *pool;
+ int pi;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- for_each_cpu_worker_pool(pool, cpu) {
+ mutex_lock(&wq_pool_mutex);
+
+ for_each_pool(pool, pi) {
mutex_lock(&pool->manager_mutex);
- spin_lock_irq(&pool->lock);
- pool->flags &= ~POOL_DISASSOCIATED;
- rebind_workers(pool);
+ if (pool->cpu == cpu) {
+ spin_lock_irq(&pool->lock);
+ pool->flags &= ~POOL_DISASSOCIATED;
+ spin_unlock_irq(&pool->lock);
+
+ rebind_workers(pool);
+ } else if (pool->cpu < 0) {
+ restore_unbound_workers_cpumask(pool, cpu);
+ }
- spin_unlock_irq(&pool->lock);
mutex_unlock(&pool->manager_mutex);
}
+
+ mutex_unlock(&wq_pool_mutex);
break;
}
return NOTIFY_OK;
* pool->worklist.
*
* CONTEXT:
- * Grabs and releases wq_mutex, pwq_lock and pool->lock's.
+ * Grabs and releases wq_pool_mutex, pwq_lock and pool->lock's.
*/
void freeze_workqueues_begin(void)
{
struct pool_workqueue *pwq;
int pi;
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
}
spin_unlock_irq(&pwq_lock);
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
}
/**
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
- * Grabs and releases wq_mutex.
+ * Grabs and releases wq_pool_mutex.
*
* RETURNS:
* %true if some freezable workqueues are still busy. %false if freezing
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(!workqueue_freezing);
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
- preempt_disable();
+ rcu_read_lock_sched();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
- preempt_enable();
+ rcu_read_unlock_sched();
goto out_unlock;
}
}
- preempt_enable();
+ rcu_read_unlock_sched();
}
out_unlock:
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
return busy;
}
* frozen works are transferred to their respective pool worklists.
*
* CONTEXT:
- * Grabs and releases wq_mutex, pwq_lock and pool->lock's.
+ * Grabs and releases wq_pool_mutex, pwq_lock and pool->lock's.
*/
void thaw_workqueues(void)
{
struct worker_pool *pool;
int pi;
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
if (!workqueue_freezing)
goto out_unlock;
}
spin_unlock_irq(&pwq_lock);
- /* kick workers */
- for_each_pool(pool, pi) {
- spin_lock_irq(&pool->lock);
- wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
- }
-
workqueue_freezing = false;
out_unlock:
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
}
#endif /* CONFIG_FREEZER */
pool->attrs->nice = std_nice[i++];
/* alloc pool ID */
- mutex_lock(&wq_mutex);
+ mutex_lock(&wq_pool_mutex);
BUG_ON(worker_pool_assign_id(pool));
- mutex_unlock(&wq_mutex);
+ mutex_unlock(&wq_pool_mutex);
}
}