workqueue: implement WQ_NON_REENTRANT
authorTejun Heo <tj@kernel.org>
Tue, 29 Jun 2010 08:07:13 +0000 (10:07 +0200)
committerTejun Heo <tj@kernel.org>
Tue, 29 Jun 2010 08:07:13 +0000 (10:07 +0200)
With gcwq managing all the workers and work->data pointing to the last
gcwq it was on, non-reentrance can be easily implemented by checking
whether the work is still running on the previous gcwq on queueing.
Implement it.

Signed-off-by: Tejun Heo <tj@kernel.org>
include/linux/workqueue.h
kernel/workqueue.c

index 0a78141..07cf5e5 100644 (file)
@@ -225,6 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 enum {
        WQ_FREEZEABLE           = 1 << 0, /* freeze during suspend */
        WQ_SINGLE_CPU           = 1 << 1, /* only single cpu at a time */
 enum {
        WQ_FREEZEABLE           = 1 << 0, /* freeze during suspend */
        WQ_SINGLE_CPU           = 1 << 1, /* only single cpu at a time */
+       WQ_NON_REENTRANT        = 1 << 2, /* guarantee non-reentrance */
 };
 
 extern struct workqueue_struct *
 };
 
 extern struct workqueue_struct *
index c68277c..bce1074 100644 (file)
@@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        debug_work_activate(work);
 
 
        debug_work_activate(work);
 
-       /* determine gcwq to use */
+       /*
+        * Determine gcwq to use.  SINGLE_CPU is inherently
+        * NON_REENTRANT, so test it first.
+        */
        if (!(wq->flags & WQ_SINGLE_CPU)) {
        if (!(wq->flags & WQ_SINGLE_CPU)) {
-               /* just use the requested cpu for multicpu workqueues */
+               struct global_cwq *last_gcwq;
+
+               /*
+                * It's multi cpu.  If @wq is non-reentrant and @work
+                * was previously on a different cpu, it might still
+                * be running there, in which case the work needs to
+                * be queued on that cpu to guarantee non-reentrance.
+                */
                gcwq = get_gcwq(cpu);
                gcwq = get_gcwq(cpu);
-               spin_lock_irqsave(&gcwq->lock, flags);
+               if (wq->flags & WQ_NON_REENTRANT &&
+                   (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
+                       struct worker *worker;
+
+                       spin_lock_irqsave(&last_gcwq->lock, flags);
+
+                       worker = find_worker_executing_work(last_gcwq, work);
+
+                       if (worker && worker->current_cwq->wq == wq)
+                               gcwq = last_gcwq;
+                       else {
+                               /* meh... not running there, queue here */
+                               spin_unlock_irqrestore(&last_gcwq->lock, flags);
+                               spin_lock_irqsave(&gcwq->lock, flags);
+                       }
+               } else
+                       spin_lock_irqsave(&gcwq->lock, flags);
        } else {
                unsigned int req_cpu = cpu;
 
        } else {
                unsigned int req_cpu = cpu;