Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[pandora-kernel.git] / kernel / workqueue.c
index 32b1091..58e5c15 100644 (file)
@@ -47,7 +47,6 @@ struct cpu_workqueue_struct {
 
        struct workqueue_struct *wq;
        struct task_struct *thread;
-       int should_stop;
 
        int run_depth;          /* Detect run_workqueue() recursion depth */
 } ____cacheline_aligned;
@@ -71,7 +70,13 @@ static LIST_HEAD(workqueues);
 
 static int singlethread_cpu __read_mostly;
 static cpumask_t cpu_singlethread_map __read_mostly;
-/* optimization, we could use cpu_possible_map */
+/*
+ * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
+ * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
+ * which comes in between can't use for_each_online_cpu(). We could
+ * use cpu_possible_map, the cpumask below is more a documentation
+ * than optimization.
+ */
 static cpumask_t cpu_populated_map __read_mostly;
 
 /* If it's single threaded, it isn't in the list of workqueues. */
@@ -86,22 +91,32 @@ static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
                ? &cpu_singlethread_map : &cpu_populated_map;
 }
 
+static
+struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
+{
+       if (unlikely(is_single_threaded(wq)))
+               cpu = singlethread_cpu;
+       return per_cpu_ptr(wq->cpu_wq, cpu);
+}
+
 /*
  * Set the workqueue on which a work item is to be run
  * - Must *only* be called if the pending flag is set
  */
-static inline void set_wq_data(struct work_struct *work, void *wq)
+static inline void set_wq_data(struct work_struct *work,
+                               struct cpu_workqueue_struct *cwq)
 {
        unsigned long new;
 
        BUG_ON(!work_pending(work));
 
-       new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+       new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
        new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
        atomic_long_set(&work->data, new);
 }
 
-static inline void *get_wq_data(struct work_struct *work)
+static inline
+struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
 {
        return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
 }
@@ -110,6 +125,11 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
                                struct work_struct *work, int tail)
 {
        set_wq_data(work, cwq);
+       /*
+        * Ensure that we get the right work->data if we see the
+        * result of list_add() below, see try_to_grab_pending().
+        */
+       smp_wmb();
        if (tail)
                list_add_tail(&work->entry, &cwq->worklist);
        else
@@ -140,16 +160,14 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
  */
 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
-       int ret = 0, cpu = get_cpu();
+       int ret = 0;
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
-               if (unlikely(is_single_threaded(wq)))
-                       cpu = singlethread_cpu;
                BUG_ON(!list_empty(&work->entry));
-               __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+               __queue_work(wq_per_cpu(wq, get_cpu()), work);
+               put_cpu();
                ret = 1;
        }
-       put_cpu();
        return ret;
 }
 EXPORT_SYMBOL_GPL(queue_work);
@@ -157,13 +175,10 @@ EXPORT_SYMBOL_GPL(queue_work);
 void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
-       struct workqueue_struct *wq = get_wq_data(&dwork->work);
-       int cpu = smp_processor_id();
-
-       if (unlikely(is_single_threaded(wq)))
-               cpu = singlethread_cpu;
+       struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
+       struct workqueue_struct *wq = cwq->wq;
 
-       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
+       __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
 }
 
 /**
@@ -177,27 +192,11 @@ void delayed_work_timer_fn(unsigned long __data)
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
                        struct delayed_work *dwork, unsigned long delay)
 {
-       int ret = 0;
-       struct timer_list *timer = &dwork->timer;
-       struct work_struct *work = &dwork->work;
-
-       timer_stats_timer_set_start_info(timer);
+       timer_stats_timer_set_start_info(&dwork->timer);
        if (delay == 0)
-               return queue_work(wq, work);
+               return queue_work(wq, &dwork->work);
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
-               BUG_ON(timer_pending(timer));
-               BUG_ON(!list_empty(&work->entry));
-
-               /* This stores wq for the moment, for the timer_fn */
-               set_wq_data(work, wq);
-               timer->expires = jiffies + delay;
-               timer->data = (unsigned long)dwork;
-               timer->function = delayed_work_timer_fn;
-               add_timer(timer);
-               ret = 1;
-       }
-       return ret;
+       return queue_delayed_work_on(-1, wq, dwork, delay);
 }
 EXPORT_SYMBOL_GPL(queue_delayed_work);
 
@@ -221,12 +220,16 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
-               /* This stores wq for the moment, for the timer_fn */
-               set_wq_data(work, wq);
+               /* This stores cwq for the moment, for the timer_fn */
+               set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
                timer->expires = jiffies + delay;
                timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
-               add_timer_on(timer, cpu);
+
+               if (unlikely(cpu >= 0))
+                       add_timer_on(timer, cpu);
+               else
+                       add_timer(timer);
                ret = 1;
        }
        return ret;
@@ -253,8 +256,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                spin_unlock_irq(&cwq->lock);
 
                BUG_ON(get_wq_data(work) != cwq);
-               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
-                       work_release(work);
+               work_clear_pending(work);
                f(work);
 
                if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
@@ -275,63 +277,27 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        spin_unlock_irq(&cwq->lock);
 }
 
-/*
- * NOTE: the caller must not touch *cwq if this func returns true
- */
-static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
-{
-       int should_stop = cwq->should_stop;
-
-       if (unlikely(should_stop)) {
-               spin_lock_irq(&cwq->lock);
-               should_stop = cwq->should_stop && list_empty(&cwq->worklist);
-               if (should_stop)
-                       cwq->thread = NULL;
-               spin_unlock_irq(&cwq->lock);
-       }
-
-       return should_stop;
-}
-
 static int worker_thread(void *__cwq)
 {
        struct cpu_workqueue_struct *cwq = __cwq;
        DEFINE_WAIT(wait);
-       struct k_sigaction sa;
-       sigset_t blocked;
 
-       if (!cwq->wq->freezeable)
-               current->flags |= PF_NOFREEZE;
+       if (cwq->wq->freezeable)
+               set_freezable();
 
        set_user_nice(current, -5);
 
-       /* Block and flush all signals */
-       sigfillset(&blocked);
-       sigprocmask(SIG_BLOCK, &blocked, NULL);
-       flush_signals(current);
-
-       /*
-        * We inherited MPOL_INTERLEAVE from the booting kernel.
-        * Set MPOL_DEFAULT to insure node local allocations.
-        */
-       numa_default_policy();
-
-       /* SIG_IGN makes children autoreap: see do_notify_parent(). */
-       sa.sa.sa_handler = SIG_IGN;
-       sa.sa.sa_flags = 0;
-       siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
-       do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
-
        for (;;) {
-               if (cwq->wq->freezeable)
-                       try_to_freeze();
-
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
-               if (!cwq->should_stop && list_empty(&cwq->worklist))
+               if (!freezing(current) &&
+                   !kthread_should_stop() &&
+                   list_empty(&cwq->worklist))
                        schedule();
                finish_wait(&cwq->more_work, &wait);
 
-               if (cwq_should_stop(cwq))
+               try_to_freeze();
+
+               if (kthread_should_stop())
                        break;
 
                run_workqueue(cwq);
@@ -362,18 +328,21 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        insert_work(cwq, &barr->work, tail);
 }
 
-static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
+static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
 {
+       int active;
+
        if (cwq->thread == current) {
                /*
                 * Probably keventd trying to flush its own queue. So simply run
                 * it by hand rather than deadlocking.
                 */
                run_workqueue(cwq);
+               active = 1;
        } else {
                struct wq_barrier barr;
-               int active = 0;
 
+               active = 0;
                spin_lock_irq(&cwq->lock);
                if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
                        insert_wq_barrier(cwq, &barr, 1);
@@ -384,6 +353,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
                if (active)
                        wait_for_completion(&barr.done);
        }
+
+       return active;
 }
 
 /**
@@ -410,7 +381,46 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
-static void wait_on_work(struct cpu_workqueue_struct *cwq,
+/*
+ * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
+ * so this work can't be re-armed in any way.
+ */
+static int try_to_grab_pending(struct work_struct *work)
+{
+       struct cpu_workqueue_struct *cwq;
+       int ret = -1;
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
+               return 0;
+
+       /*
+        * The queueing is in progress, or it is already queued. Try to
+        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+        */
+
+       cwq = get_wq_data(work);
+       if (!cwq)
+               return ret;
+
+       spin_lock_irq(&cwq->lock);
+       if (!list_empty(&work->entry)) {
+               /*
+                * This work is queued, but perhaps we locked the wrong cwq.
+                * In that case we must see the new value after rmb(), see
+                * insert_work()->wmb().
+                */
+               smp_rmb();
+               if (cwq == get_wq_data(work)) {
+                       list_del_init(&work->entry);
+                       ret = 1;
+               }
+       }
+       spin_unlock_irq(&cwq->lock);
+
+       return ret;
+}
+
+static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
                                struct work_struct *work)
 {
        struct wq_barrier barr;
@@ -427,49 +437,85 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
                wait_for_completion(&barr.done);
 }
 
-/**
- * flush_work - block until a work_struct's callback has terminated
- * @wq: the workqueue on which the work is queued
- * @work: the work which is to be flushed
- *
- * flush_work() will attempt to cancel the work if it is queued.  If the work's
- * callback appears to be running, flush_work() will block until it has
- * completed.
- *
- * flush_work() is designed to be used when the caller is tearing down data
- * structures which the callback function operates upon.  It is expected that,
- * prior to calling flush_work(), the caller has arranged for the work to not
- * be requeued.
- */
-void flush_work(struct workqueue_struct *wq, struct work_struct *work)
+static void wait_on_work(struct work_struct *work)
 {
-       const cpumask_t *cpu_map = wq_cpu_map(wq);
        struct cpu_workqueue_struct *cwq;
+       struct workqueue_struct *wq;
+       const cpumask_t *cpu_map;
        int cpu;
 
        might_sleep();
 
        cwq = get_wq_data(work);
-       /* Was it ever queued ? */
        if (!cwq)
                return;
 
-       /*
-        * This work can't be re-queued, no need to re-check that
-        * get_wq_data() is still the same when we take cwq->lock.
-        */
-       spin_lock_irq(&cwq->lock);
-       list_del_init(&work->entry);
-       work_release(work);
-       spin_unlock_irq(&cwq->lock);
+       wq = cwq->wq;
+       cpu_map = wq_cpu_map(wq);
 
        for_each_cpu_mask(cpu, *cpu_map)
-               wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+               wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+}
+
+static int __cancel_work_timer(struct work_struct *work,
+                               struct timer_list* timer)
+{
+       int ret;
+
+       do {
+               ret = (timer && likely(del_timer(timer)));
+               if (!ret)
+                       ret = try_to_grab_pending(work);
+               wait_on_work(work);
+       } while (unlikely(ret < 0));
+
+       work_clear_pending(work);
+       return ret;
+}
+
+/**
+ * cancel_work_sync - block until a work_struct's callback has terminated
+ * @work: the work which is to be flushed
+ *
+ * Returns true if @work was pending.
+ *
+ * cancel_work_sync() will cancel the work if it is queued. If the work's
+ * callback appears to be running, cancel_work_sync() will block until it
+ * has completed.
+ *
+ * It is possible to use this function if the work re-queues itself. It can
+ * cancel the work even if it migrates to another workqueue, however in that
+ * case it only guarantees that work->func() has completed on the last queued
+ * workqueue.
+ *
+ * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
+ * pending, otherwise it goes into a busy-wait loop until the timer expires.
+ *
+ * The caller must ensure that workqueue_struct on which this work was last
+ * queued can't be destroyed before this function returns.
+ */
+int cancel_work_sync(struct work_struct *work)
+{
+       return __cancel_work_timer(work, NULL);
 }
-EXPORT_SYMBOL_GPL(flush_work);
+EXPORT_SYMBOL_GPL(cancel_work_sync);
 
+/**
+ * cancel_delayed_work_sync - reliably kill off a delayed work.
+ * @dwork: the delayed work struct
+ *
+ * Returns true if @dwork was pending.
+ *
+ * It is possible to use this function if @dwork rearms itself via queue_work()
+ * or queue_delayed_work(). See also the comment for cancel_work_sync().
+ */
+int cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+       return __cancel_work_timer(&dwork->work, &dwork->timer);
+}
+EXPORT_SYMBOL(cancel_delayed_work_sync);
 
-static struct workqueue_struct *keventd_wq;
+static struct workqueue_struct *keventd_wq __read_mostly;
 
 /**
  * schedule_work - put work task in global workqueue
@@ -555,39 +601,6 @@ void flush_scheduled_work(void)
 }
 EXPORT_SYMBOL(flush_scheduled_work);
 
-void flush_work_keventd(struct work_struct *work)
-{
-       flush_work(keventd_wq, work);
-}
-EXPORT_SYMBOL(flush_work_keventd);
-
-/**
- * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
- * @wq:   the controlling workqueue structure
- * @dwork: the delayed work struct
- */
-void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-                                      struct delayed_work *dwork)
-{
-       /* Was it ever queued ? */
-       if (!get_wq_data(&dwork->work))
-               return;
-
-       while (!cancel_delayed_work(dwork))
-               flush_workqueue(wq);
-}
-EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
-
-/**
- * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
- * @dwork: the delayed work struct
- */
-void cancel_rearming_delayed_work(struct delayed_work *dwork)
-{
-       cancel_rearming_delayed_workqueue(keventd_wq, dwork);
-}
-EXPORT_SYMBOL(cancel_rearming_delayed_work);
-
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:                the function to execute
@@ -667,16 +680,21 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
                return PTR_ERR(p);
 
        cwq->thread = p;
-       cwq->should_stop = 0;
-       if (!is_single_threaded(wq))
-               kthread_bind(p, cpu);
-
-       if (is_single_threaded(wq) || cpu_online(cpu))
-               wake_up_process(p);
 
        return 0;
 }
 
+static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+{
+       struct task_struct *p = cwq->thread;
+
+       if (p != NULL) {
+               if (cpu >= 0)
+                       kthread_bind(p, cpu);
+               wake_up_process(p);
+       }
+}
+
 struct workqueue_struct *__create_workqueue(const char *name,
                                            int singlethread, int freezeable)
 {
@@ -702,6 +720,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        if (singlethread) {
                cwq = init_cpu_workqueue(wq, singlethread_cpu);
                err = create_workqueue_thread(cwq, singlethread_cpu);
+               start_workqueue_thread(cwq, -1);
        } else {
                mutex_lock(&workqueue_mutex);
                list_add(&wq->list, &workqueues);
@@ -711,6 +730,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
                        if (err || !cpu_online(cpu))
                                continue;
                        err = create_workqueue_thread(cwq, cpu);
+                       start_workqueue_thread(cwq, cpu);
                }
                mutex_unlock(&workqueue_mutex);
        }
@@ -725,29 +745,26 @@ EXPORT_SYMBOL_GPL(__create_workqueue);
 
 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
-       struct wq_barrier barr;
-       int alive = 0;
-
-       spin_lock_irq(&cwq->lock);
-       if (cwq->thread != NULL) {
-               insert_wq_barrier(cwq, &barr, 1);
-               cwq->should_stop = 1;
-               alive = 1;
-       }
-       spin_unlock_irq(&cwq->lock);
-
-       if (alive) {
-               wait_for_completion(&barr.done);
+       /*
+        * Our caller is either destroy_workqueue() or CPU_DEAD,
+        * workqueue_mutex protects cwq->thread
+        */
+       if (cwq->thread == NULL)
+               return;
 
-               while (unlikely(cwq->thread != NULL))
-                       cpu_relax();
-               /*
-                * Wait until cwq->thread unlocks cwq->lock,
-                * it won't touch *cwq after that.
-                */
-               smp_rmb();
-               spin_unlock_wait(&cwq->lock);
-       }
+       flush_cpu_workqueue(cwq);
+       /*
+        * If the caller is CPU_DEAD and cwq->worklist was not empty,
+        * a concurrent flush_workqueue() can insert a barrier after us.
+        * However, in that case run_workqueue() won't return and check
+        * kthread_should_stop() until it flushes all work_struct's.
+        * When ->worklist becomes empty it is safe to exit because no
+        * more work_structs can be queued on this cwq: flush_workqueue
+        * checks list_empty(), and a "normal" queue_work() can't use
+        * a dead CPU.
+        */
+       kthread_stop(cwq->thread);
+       cwq->thread = NULL;
 }
 
 /**
@@ -784,6 +801,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        struct cpu_workqueue_struct *cwq;
        struct workqueue_struct *wq;
 
+       action &= ~CPU_TASKS_FROZEN;
+
        switch (action) {
        case CPU_LOCK_ACQUIRE:
                mutex_lock(&workqueue_mutex);
@@ -808,12 +827,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                        return NOTIFY_BAD;
 
                case CPU_ONLINE:
-                       wake_up_process(cwq->thread);
+                       start_workqueue_thread(cwq, cpu);
                        break;
 
                case CPU_UP_CANCELED:
-                       if (cwq->thread)
-                               wake_up_process(cwq->thread);
+                       start_workqueue_thread(cwq, -1);
                case CPU_DEAD:
                        cleanup_workqueue_thread(cwq, cpu);
                        break;
@@ -823,7 +841,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-void init_workqueues(void)
+void __init init_workqueues(void)
 {
        cpu_populated_map = cpu_online_map;
        singlethread_cpu = first_cpu(cpu_possible_map);