{
clear_bit(WS_USED_B, &work->state);
smp_mb__after_clear_bit();
+ /*
+ * work can have disappeared at this point. bit waitq functions
+ * should be able to tolerate this, provided bdi_sched_wait does
+ * not dereference it's pointer argument.
+ */
wake_up_bit(&work->state, WS_USED_B);
}
static void wb_work_complete(struct bdi_work *work)
{
const enum writeback_sync_modes sync_mode = work->args.sync_mode;
+ int onstack = bdi_work_on_stack(work);
/*
* For allocated work, we can clear the done/seen bit right here.
* to after the RCU grace period, since the stack could be invalidated
* as soon as bdi_work_clear() has done the wakeup.
*/
- if (!bdi_work_on_stack(work))
+ if (!onstack)
bdi_work_clear(work);
- if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
+ if (sync_mode == WB_SYNC_NONE || onstack)
call_rcu(&work->rcu_head, bdi_work_free);
}
BUG_ON(!bdi->wb_cnt);
/*
- * Make sure stores are seen before it appears on the list
+ * list_add_tail_rcu() contains the necessary barriers to
+ * make sure the above stores are seen before the item is
+ * noticed on the list
*/
- smp_mb();
-
spin_lock(&bdi->wb_lock);
list_add_tail_rcu(&work->list, &bdi->work_list);
spin_unlock(&bdi->wb_lock);
else {
struct bdi_writeback *wb = &bdi->wb;
- /*
- * End work now if this wb has no dirty IO pending. Otherwise
- * wakeup the handling thread
- */
- if (!wb_has_dirty_io(wb))
- wb_clear_pending(wb, work);
- else if (wb->task)
+ if (wb->task)
wake_up_process(wb->task);
}
}
rcu_read_lock();
list_for_each_entry_rcu(work, &bdi->work_list, list) {
- if (!test_and_clear_bit(wb->nr, &work->seen))
+ if (!test_bit(wb->nr, &work->seen))
continue;
+ clear_bit(wb->nr, &work->seen);
ret = work;
break;