BUG_ON(!work_pending(work));
new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
- new |= work->management & WORK_STRUCT_FLAG_MASK;
- work->management = new;
+ new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
+ atomic_long_set(&work->data, new);
}
static inline void *get_wq_data(struct work_struct *work)
{
- return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
+ return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
}
static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
list_del_init(&work->entry);
spin_unlock_irqrestore(&cwq->lock, flags);
- if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+ if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
work_release(work);
f(work);
{
int ret = 0, cpu = get_cpu();
- if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
if (unlikely(is_single_threaded(wq)))
cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry));
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
- * @work: delayable work to queue
+ * @dwork: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
if (delay == 0)
return queue_work(wq, work);
- if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
* queue_delayed_work_on - queue work on specific CPU after delay
* @cpu: CPU number to execute work on
* @wq: workqueue to use
- * @work: work to queue
+ * @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
- if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
spin_unlock_irqrestore(&cwq->lock, flags);
BUG_ON(get_wq_data(work) != cwq);
- if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+ if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
work_release(work);
f(work);
mutex_lock(&workqueue_mutex);
for_each_online_cpu(cpu) {
- INIT_WORK(per_cpu_ptr(works, cpu), func);
- __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
- per_cpu_ptr(works, cpu));
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, func);
+ set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
+ __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
}
mutex_unlock(&workqueue_mutex);
flush_workqueue(keventd_wq);
EXPORT_SYMBOL(flush_scheduled_work);
/**
- * cancel_rearming_delayed_workqueue - reliably kill off a delayed
- * work whose handler rearms the delayed work.
+ * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
* @wq: the controlling workqueue structure
* @dwork: the delayed work struct
*/
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
/**
- * cancel_rearming_delayed_work - reliably kill off a delayed keventd
- * work whose handler rearms the delayed work.
+ * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
* @dwork: the delayed work struct
*/
void cancel_rearming_delayed_work(struct delayed_work *dwork)