Merge branch 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
38
39 /*
40  * The per-CPU workqueue (if single thread, we always use the first
41  * possible cpu).
42  */
43 struct cpu_workqueue_struct {
44
45         spinlock_t lock;
46
47         struct list_head worklist;
48         wait_queue_head_t more_work;
49         struct work_struct *current_work;
50
51         struct workqueue_struct *wq;
52         struct task_struct *thread;
53 } ____cacheline_aligned;
54
55 /*
56  * The externally visible workqueue abstraction is an array of
57  * per-CPU workqueues:
58  */
59 struct workqueue_struct {
60         struct cpu_workqueue_struct *cpu_wq;
61         struct list_head list;
62         const char *name;
63         int singlethread;
64         int freezeable;         /* Freeze threads during suspend */
65         int rt;
66 #ifdef CONFIG_LOCKDEP
67         struct lockdep_map lockdep_map;
68 #endif
69 };
70
71 /* Serializes the accesses to the list of workqueues. */
72 static DEFINE_SPINLOCK(workqueue_lock);
73 static LIST_HEAD(workqueues);
74
75 static int singlethread_cpu __read_mostly;
76 static const struct cpumask *cpu_singlethread_map __read_mostly;
77 /*
78  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
80  * which comes in between can't use for_each_online_cpu(). We could
81  * use cpu_possible_map, the cpumask below is more a documentation
82  * than optimization.
83  */
84 static cpumask_var_t cpu_populated_map __read_mostly;
85
86 /* If it's single threaded, it isn't in the list of workqueues. */
87 static inline int is_wq_single_threaded(struct workqueue_struct *wq)
88 {
89         return wq->singlethread;
90 }
91
92 static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
93 {
94         return is_wq_single_threaded(wq)
95                 ? cpu_singlethread_map : cpu_populated_map;
96 }
97
98 static
99 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
100 {
101         if (unlikely(is_wq_single_threaded(wq)))
102                 cpu = singlethread_cpu;
103         return per_cpu_ptr(wq->cpu_wq, cpu);
104 }
105
106 /*
107  * Set the workqueue on which a work item is to be run
108  * - Must *only* be called if the pending flag is set
109  */
110 static inline void set_wq_data(struct work_struct *work,
111                                 struct cpu_workqueue_struct *cwq)
112 {
113         unsigned long new;
114
115         BUG_ON(!work_pending(work));
116
117         new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
118         new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
119         atomic_long_set(&work->data, new);
120 }
121
122 static inline
123 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
124 {
125         return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
126 }
127
128 static void insert_work(struct cpu_workqueue_struct *cwq,
129                         struct work_struct *work, struct list_head *head)
130 {
131         trace_workqueue_insertion(cwq->thread, work);
132
133         set_wq_data(work, cwq);
134         /*
135          * Ensure that we get the right work->data if we see the
136          * result of list_add() below, see try_to_grab_pending().
137          */
138         smp_wmb();
139         list_add_tail(&work->entry, head);
140         wake_up(&cwq->more_work);
141 }
142
143 static void __queue_work(struct cpu_workqueue_struct *cwq,
144                          struct work_struct *work)
145 {
146         unsigned long flags;
147
148         spin_lock_irqsave(&cwq->lock, flags);
149         insert_work(cwq, work, &cwq->worklist);
150         spin_unlock_irqrestore(&cwq->lock, flags);
151 }
152
153 /**
154  * queue_work - queue work on a workqueue
155  * @wq: workqueue to use
156  * @work: work to queue
157  *
158  * Returns 0 if @work was already on a queue, non-zero otherwise.
159  *
160  * We queue the work to the CPU on which it was submitted, but if the CPU dies
161  * it can be processed by another CPU.
162  */
163 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
164 {
165         int ret;
166
167         ret = queue_work_on(get_cpu(), wq, work);
168         put_cpu();
169
170         return ret;
171 }
172 EXPORT_SYMBOL_GPL(queue_work);
173
174 /**
175  * queue_work_on - queue work on specific cpu
176  * @cpu: CPU number to execute work on
177  * @wq: workqueue to use
178  * @work: work to queue
179  *
180  * Returns 0 if @work was already on a queue, non-zero otherwise.
181  *
182  * We queue the work to a specific CPU, the caller must ensure it
183  * can't go away.
184  */
185 int
186 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
187 {
188         int ret = 0;
189
190         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
191                 BUG_ON(!list_empty(&work->entry));
192                 __queue_work(wq_per_cpu(wq, cpu), work);
193                 ret = 1;
194         }
195         return ret;
196 }
197 EXPORT_SYMBOL_GPL(queue_work_on);
198
199 static void delayed_work_timer_fn(unsigned long __data)
200 {
201         struct delayed_work *dwork = (struct delayed_work *)__data;
202         struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
203         struct workqueue_struct *wq = cwq->wq;
204
205         __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
206 }
207
208 /**
209  * queue_delayed_work - queue work on a workqueue after delay
210  * @wq: workqueue to use
211  * @dwork: delayable work to queue
212  * @delay: number of jiffies to wait before queueing
213  *
214  * Returns 0 if @work was already on a queue, non-zero otherwise.
215  */
216 int queue_delayed_work(struct workqueue_struct *wq,
217                         struct delayed_work *dwork, unsigned long delay)
218 {
219         if (delay == 0)
220                 return queue_work(wq, &dwork->work);
221
222         return queue_delayed_work_on(-1, wq, dwork, delay);
223 }
224 EXPORT_SYMBOL_GPL(queue_delayed_work);
225
226 /**
227  * queue_delayed_work_on - queue work on specific CPU after delay
228  * @cpu: CPU number to execute work on
229  * @wq: workqueue to use
230  * @dwork: work to queue
231  * @delay: number of jiffies to wait before queueing
232  *
233  * Returns 0 if @work was already on a queue, non-zero otherwise.
234  */
235 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
236                         struct delayed_work *dwork, unsigned long delay)
237 {
238         int ret = 0;
239         struct timer_list *timer = &dwork->timer;
240         struct work_struct *work = &dwork->work;
241
242         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
243                 BUG_ON(timer_pending(timer));
244                 BUG_ON(!list_empty(&work->entry));
245
246                 timer_stats_timer_set_start_info(&dwork->timer);
247
248                 /* This stores cwq for the moment, for the timer_fn */
249                 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
250                 timer->expires = jiffies + delay;
251                 timer->data = (unsigned long)dwork;
252                 timer->function = delayed_work_timer_fn;
253
254                 if (unlikely(cpu >= 0))
255                         add_timer_on(timer, cpu);
256                 else
257                         add_timer(timer);
258                 ret = 1;
259         }
260         return ret;
261 }
262 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
263
264 static void run_workqueue(struct cpu_workqueue_struct *cwq)
265 {
266         spin_lock_irq(&cwq->lock);
267         while (!list_empty(&cwq->worklist)) {
268                 struct work_struct *work = list_entry(cwq->worklist.next,
269                                                 struct work_struct, entry);
270                 work_func_t f = work->func;
271 #ifdef CONFIG_LOCKDEP
272                 /*
273                  * It is permissible to free the struct work_struct
274                  * from inside the function that is called from it,
275                  * this we need to take into account for lockdep too.
276                  * To avoid bogus "held lock freed" warnings as well
277                  * as problems when looking into work->lockdep_map,
278                  * make a copy and use that here.
279                  */
280                 struct lockdep_map lockdep_map = work->lockdep_map;
281 #endif
282                 trace_workqueue_execution(cwq->thread, work);
283                 cwq->current_work = work;
284                 list_del_init(cwq->worklist.next);
285                 spin_unlock_irq(&cwq->lock);
286
287                 BUG_ON(get_wq_data(work) != cwq);
288                 work_clear_pending(work);
289                 lock_map_acquire(&cwq->wq->lockdep_map);
290                 lock_map_acquire(&lockdep_map);
291                 f(work);
292                 lock_map_release(&lockdep_map);
293                 lock_map_release(&cwq->wq->lockdep_map);
294
295                 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
296                         printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
297                                         "%s/0x%08x/%d\n",
298                                         current->comm, preempt_count(),
299                                         task_pid_nr(current));
300                         printk(KERN_ERR "    last function: ");
301                         print_symbol("%s\n", (unsigned long)f);
302                         debug_show_held_locks(current);
303                         dump_stack();
304                 }
305
306                 spin_lock_irq(&cwq->lock);
307                 cwq->current_work = NULL;
308         }
309         spin_unlock_irq(&cwq->lock);
310 }
311
312 static int worker_thread(void *__cwq)
313 {
314         struct cpu_workqueue_struct *cwq = __cwq;
315         DEFINE_WAIT(wait);
316
317         if (cwq->wq->freezeable)
318                 set_freezable();
319
320         set_user_nice(current, -5);
321
322         for (;;) {
323                 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
324                 if (!freezing(current) &&
325                     !kthread_should_stop() &&
326                     list_empty(&cwq->worklist))
327                         schedule();
328                 finish_wait(&cwq->more_work, &wait);
329
330                 try_to_freeze();
331
332                 if (kthread_should_stop())
333                         break;
334
335                 run_workqueue(cwq);
336         }
337
338         return 0;
339 }
340
341 struct wq_barrier {
342         struct work_struct      work;
343         struct completion       done;
344 };
345
346 static void wq_barrier_func(struct work_struct *work)
347 {
348         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
349         complete(&barr->done);
350 }
351
352 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
353                         struct wq_barrier *barr, struct list_head *head)
354 {
355         INIT_WORK(&barr->work, wq_barrier_func);
356         __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
357
358         init_completion(&barr->done);
359
360         insert_work(cwq, &barr->work, head);
361 }
362
363 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
364 {
365         int active = 0;
366         struct wq_barrier barr;
367
368         WARN_ON(cwq->thread == current);
369
370         spin_lock_irq(&cwq->lock);
371         if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
372                 insert_wq_barrier(cwq, &barr, &cwq->worklist);
373                 active = 1;
374         }
375         spin_unlock_irq(&cwq->lock);
376
377         if (active)
378                 wait_for_completion(&barr.done);
379
380         return active;
381 }
382
383 /**
384  * flush_workqueue - ensure that any scheduled work has run to completion.
385  * @wq: workqueue to flush
386  *
387  * Forces execution of the workqueue and blocks until its completion.
388  * This is typically used in driver shutdown handlers.
389  *
390  * We sleep until all works which were queued on entry have been handled,
391  * but we are not livelocked by new incoming ones.
392  *
393  * This function used to run the workqueues itself.  Now we just wait for the
394  * helper threads to do it.
395  */
396 void flush_workqueue(struct workqueue_struct *wq)
397 {
398         const struct cpumask *cpu_map = wq_cpu_map(wq);
399         int cpu;
400
401         might_sleep();
402         lock_map_acquire(&wq->lockdep_map);
403         lock_map_release(&wq->lockdep_map);
404         for_each_cpu(cpu, cpu_map)
405                 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
406 }
407 EXPORT_SYMBOL_GPL(flush_workqueue);
408
409 /**
410  * flush_work - block until a work_struct's callback has terminated
411  * @work: the work which is to be flushed
412  *
413  * Returns false if @work has already terminated.
414  *
415  * It is expected that, prior to calling flush_work(), the caller has
416  * arranged for the work to not be requeued, otherwise it doesn't make
417  * sense to use this function.
418  */
419 int flush_work(struct work_struct *work)
420 {
421         struct cpu_workqueue_struct *cwq;
422         struct list_head *prev;
423         struct wq_barrier barr;
424
425         might_sleep();
426         cwq = get_wq_data(work);
427         if (!cwq)
428                 return 0;
429
430         lock_map_acquire(&cwq->wq->lockdep_map);
431         lock_map_release(&cwq->wq->lockdep_map);
432
433         prev = NULL;
434         spin_lock_irq(&cwq->lock);
435         if (!list_empty(&work->entry)) {
436                 /*
437                  * See the comment near try_to_grab_pending()->smp_rmb().
438                  * If it was re-queued under us we are not going to wait.
439                  */
440                 smp_rmb();
441                 if (unlikely(cwq != get_wq_data(work)))
442                         goto out;
443                 prev = &work->entry;
444         } else {
445                 if (cwq->current_work != work)
446                         goto out;
447                 prev = &cwq->worklist;
448         }
449         insert_wq_barrier(cwq, &barr, prev->next);
450 out:
451         spin_unlock_irq(&cwq->lock);
452         if (!prev)
453                 return 0;
454
455         wait_for_completion(&barr.done);
456         return 1;
457 }
458 EXPORT_SYMBOL_GPL(flush_work);
459
460 /*
461  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
462  * so this work can't be re-armed in any way.
463  */
464 static int try_to_grab_pending(struct work_struct *work)
465 {
466         struct cpu_workqueue_struct *cwq;
467         int ret = -1;
468
469         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
470                 return 0;
471
472         /*
473          * The queueing is in progress, or it is already queued. Try to
474          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
475          */
476
477         cwq = get_wq_data(work);
478         if (!cwq)
479                 return ret;
480
481         spin_lock_irq(&cwq->lock);
482         if (!list_empty(&work->entry)) {
483                 /*
484                  * This work is queued, but perhaps we locked the wrong cwq.
485                  * In that case we must see the new value after rmb(), see
486                  * insert_work()->wmb().
487                  */
488                 smp_rmb();
489                 if (cwq == get_wq_data(work)) {
490                         list_del_init(&work->entry);
491                         ret = 1;
492                 }
493         }
494         spin_unlock_irq(&cwq->lock);
495
496         return ret;
497 }
498
499 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
500                                 struct work_struct *work)
501 {
502         struct wq_barrier barr;
503         int running = 0;
504
505         spin_lock_irq(&cwq->lock);
506         if (unlikely(cwq->current_work == work)) {
507                 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
508                 running = 1;
509         }
510         spin_unlock_irq(&cwq->lock);
511
512         if (unlikely(running))
513                 wait_for_completion(&barr.done);
514 }
515
516 static void wait_on_work(struct work_struct *work)
517 {
518         struct cpu_workqueue_struct *cwq;
519         struct workqueue_struct *wq;
520         const struct cpumask *cpu_map;
521         int cpu;
522
523         might_sleep();
524
525         lock_map_acquire(&work->lockdep_map);
526         lock_map_release(&work->lockdep_map);
527
528         cwq = get_wq_data(work);
529         if (!cwq)
530                 return;
531
532         wq = cwq->wq;
533         cpu_map = wq_cpu_map(wq);
534
535         for_each_cpu(cpu, cpu_map)
536                 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
537 }
538
539 static int __cancel_work_timer(struct work_struct *work,
540                                 struct timer_list* timer)
541 {
542         int ret;
543
544         do {
545                 ret = (timer && likely(del_timer(timer)));
546                 if (!ret)
547                         ret = try_to_grab_pending(work);
548                 wait_on_work(work);
549         } while (unlikely(ret < 0));
550
551         work_clear_pending(work);
552         return ret;
553 }
554
555 /**
556  * cancel_work_sync - block until a work_struct's callback has terminated
557  * @work: the work which is to be flushed
558  *
559  * Returns true if @work was pending.
560  *
561  * cancel_work_sync() will cancel the work if it is queued. If the work's
562  * callback appears to be running, cancel_work_sync() will block until it
563  * has completed.
564  *
565  * It is possible to use this function if the work re-queues itself. It can
566  * cancel the work even if it migrates to another workqueue, however in that
567  * case it only guarantees that work->func() has completed on the last queued
568  * workqueue.
569  *
570  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
571  * pending, otherwise it goes into a busy-wait loop until the timer expires.
572  *
573  * The caller must ensure that workqueue_struct on which this work was last
574  * queued can't be destroyed before this function returns.
575  */
576 int cancel_work_sync(struct work_struct *work)
577 {
578         return __cancel_work_timer(work, NULL);
579 }
580 EXPORT_SYMBOL_GPL(cancel_work_sync);
581
582 /**
583  * cancel_delayed_work_sync - reliably kill off a delayed work.
584  * @dwork: the delayed work struct
585  *
586  * Returns true if @dwork was pending.
587  *
588  * It is possible to use this function if @dwork rearms itself via queue_work()
589  * or queue_delayed_work(). See also the comment for cancel_work_sync().
590  */
591 int cancel_delayed_work_sync(struct delayed_work *dwork)
592 {
593         return __cancel_work_timer(&dwork->work, &dwork->timer);
594 }
595 EXPORT_SYMBOL(cancel_delayed_work_sync);
596
597 static struct workqueue_struct *keventd_wq __read_mostly;
598
599 /**
600  * schedule_work - put work task in global workqueue
601  * @work: job to be done
602  *
603  * Returns zero if @work was already on the kernel-global workqueue and
604  * non-zero otherwise.
605  *
606  * This puts a job in the kernel-global workqueue if it was not already
607  * queued and leaves it in the same position on the kernel-global
608  * workqueue otherwise.
609  */
610 int schedule_work(struct work_struct *work)
611 {
612         return queue_work(keventd_wq, work);
613 }
614 EXPORT_SYMBOL(schedule_work);
615
616 /*
617  * schedule_work_on - put work task on a specific cpu
618  * @cpu: cpu to put the work task on
619  * @work: job to be done
620  *
621  * This puts a job on a specific cpu
622  */
623 int schedule_work_on(int cpu, struct work_struct *work)
624 {
625         return queue_work_on(cpu, keventd_wq, work);
626 }
627 EXPORT_SYMBOL(schedule_work_on);
628
629 /**
630  * schedule_delayed_work - put work task in global workqueue after delay
631  * @dwork: job to be done
632  * @delay: number of jiffies to wait or 0 for immediate execution
633  *
634  * After waiting for a given time this puts a job in the kernel-global
635  * workqueue.
636  */
637 int schedule_delayed_work(struct delayed_work *dwork,
638                                         unsigned long delay)
639 {
640         return queue_delayed_work(keventd_wq, dwork, delay);
641 }
642 EXPORT_SYMBOL(schedule_delayed_work);
643
644 /**
645  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
646  * @cpu: cpu to use
647  * @dwork: job to be done
648  * @delay: number of jiffies to wait
649  *
650  * After waiting for a given time this puts a job in the kernel-global
651  * workqueue on the specified CPU.
652  */
653 int schedule_delayed_work_on(int cpu,
654                         struct delayed_work *dwork, unsigned long delay)
655 {
656         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
657 }
658 EXPORT_SYMBOL(schedule_delayed_work_on);
659
660 /**
661  * schedule_on_each_cpu - call a function on each online CPU from keventd
662  * @func: the function to call
663  *
664  * Returns zero on success.
665  * Returns -ve errno on failure.
666  *
667  * schedule_on_each_cpu() is very slow.
668  */
669 int schedule_on_each_cpu(work_func_t func)
670 {
671         int cpu;
672         struct work_struct *works;
673
674         works = alloc_percpu(struct work_struct);
675         if (!works)
676                 return -ENOMEM;
677
678         get_online_cpus();
679         for_each_online_cpu(cpu) {
680                 struct work_struct *work = per_cpu_ptr(works, cpu);
681
682                 INIT_WORK(work, func);
683                 schedule_work_on(cpu, work);
684         }
685         for_each_online_cpu(cpu)
686                 flush_work(per_cpu_ptr(works, cpu));
687         put_online_cpus();
688         free_percpu(works);
689         return 0;
690 }
691
692 void flush_scheduled_work(void)
693 {
694         flush_workqueue(keventd_wq);
695 }
696 EXPORT_SYMBOL(flush_scheduled_work);
697
698 /**
699  * execute_in_process_context - reliably execute the routine with user context
700  * @fn:         the function to execute
701  * @ew:         guaranteed storage for the execute work structure (must
702  *              be available when the work executes)
703  *
704  * Executes the function immediately if process context is available,
705  * otherwise schedules the function for delayed execution.
706  *
707  * Returns:     0 - function was executed
708  *              1 - function was scheduled for execution
709  */
710 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
711 {
712         if (!in_interrupt()) {
713                 fn(&ew->work);
714                 return 0;
715         }
716
717         INIT_WORK(&ew->work, fn);
718         schedule_work(&ew->work);
719
720         return 1;
721 }
722 EXPORT_SYMBOL_GPL(execute_in_process_context);
723
724 int keventd_up(void)
725 {
726         return keventd_wq != NULL;
727 }
728
729 int current_is_keventd(void)
730 {
731         struct cpu_workqueue_struct *cwq;
732         int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
733         int ret = 0;
734
735         BUG_ON(!keventd_wq);
736
737         cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
738         if (current == cwq->thread)
739                 ret = 1;
740
741         return ret;
742
743 }
744
745 static struct cpu_workqueue_struct *
746 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
747 {
748         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
749
750         cwq->wq = wq;
751         spin_lock_init(&cwq->lock);
752         INIT_LIST_HEAD(&cwq->worklist);
753         init_waitqueue_head(&cwq->more_work);
754
755         return cwq;
756 }
757
758 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
759 {
760         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
761         struct workqueue_struct *wq = cwq->wq;
762         const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
763         struct task_struct *p;
764
765         p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
766         /*
767          * Nobody can add the work_struct to this cwq,
768          *      if (caller is __create_workqueue)
769          *              nobody should see this wq
770          *      else // caller is CPU_UP_PREPARE
771          *              cpu is not on cpu_online_map
772          * so we can abort safely.
773          */
774         if (IS_ERR(p))
775                 return PTR_ERR(p);
776         if (cwq->wq->rt)
777                 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
778         cwq->thread = p;
779
780         trace_workqueue_creation(cwq->thread, cpu);
781
782         return 0;
783 }
784
785 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
786 {
787         struct task_struct *p = cwq->thread;
788
789         if (p != NULL) {
790                 if (cpu >= 0)
791                         kthread_bind(p, cpu);
792                 wake_up_process(p);
793         }
794 }
795
796 struct workqueue_struct *__create_workqueue_key(const char *name,
797                                                 int singlethread,
798                                                 int freezeable,
799                                                 int rt,
800                                                 struct lock_class_key *key,
801                                                 const char *lock_name)
802 {
803         struct workqueue_struct *wq;
804         struct cpu_workqueue_struct *cwq;
805         int err = 0, cpu;
806
807         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
808         if (!wq)
809                 return NULL;
810
811         wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
812         if (!wq->cpu_wq) {
813                 kfree(wq);
814                 return NULL;
815         }
816
817         wq->name = name;
818         lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
819         wq->singlethread = singlethread;
820         wq->freezeable = freezeable;
821         wq->rt = rt;
822         INIT_LIST_HEAD(&wq->list);
823
824         if (singlethread) {
825                 cwq = init_cpu_workqueue(wq, singlethread_cpu);
826                 err = create_workqueue_thread(cwq, singlethread_cpu);
827                 start_workqueue_thread(cwq, -1);
828         } else {
829                 cpu_maps_update_begin();
830                 /*
831                  * We must place this wq on list even if the code below fails.
832                  * cpu_down(cpu) can remove cpu from cpu_populated_map before
833                  * destroy_workqueue() takes the lock, in that case we leak
834                  * cwq[cpu]->thread.
835                  */
836                 spin_lock(&workqueue_lock);
837                 list_add(&wq->list, &workqueues);
838                 spin_unlock(&workqueue_lock);
839                 /*
840                  * We must initialize cwqs for each possible cpu even if we
841                  * are going to call destroy_workqueue() finally. Otherwise
842                  * cpu_up() can hit the uninitialized cwq once we drop the
843                  * lock.
844                  */
845                 for_each_possible_cpu(cpu) {
846                         cwq = init_cpu_workqueue(wq, cpu);
847                         if (err || !cpu_online(cpu))
848                                 continue;
849                         err = create_workqueue_thread(cwq, cpu);
850                         start_workqueue_thread(cwq, cpu);
851                 }
852                 cpu_maps_update_done();
853         }
854
855         if (err) {
856                 destroy_workqueue(wq);
857                 wq = NULL;
858         }
859         return wq;
860 }
861 EXPORT_SYMBOL_GPL(__create_workqueue_key);
862
863 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
864 {
865         /*
866          * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
867          * cpu_add_remove_lock protects cwq->thread.
868          */
869         if (cwq->thread == NULL)
870                 return;
871
872         lock_map_acquire(&cwq->wq->lockdep_map);
873         lock_map_release(&cwq->wq->lockdep_map);
874
875         flush_cpu_workqueue(cwq);
876         /*
877          * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
878          * a concurrent flush_workqueue() can insert a barrier after us.
879          * However, in that case run_workqueue() won't return and check
880          * kthread_should_stop() until it flushes all work_struct's.
881          * When ->worklist becomes empty it is safe to exit because no
882          * more work_structs can be queued on this cwq: flush_workqueue
883          * checks list_empty(), and a "normal" queue_work() can't use
884          * a dead CPU.
885          */
886         trace_workqueue_destruction(cwq->thread);
887         kthread_stop(cwq->thread);
888         cwq->thread = NULL;
889 }
890
891 /**
892  * destroy_workqueue - safely terminate a workqueue
893  * @wq: target workqueue
894  *
895  * Safely destroy a workqueue. All work currently pending will be done first.
896  */
897 void destroy_workqueue(struct workqueue_struct *wq)
898 {
899         const struct cpumask *cpu_map = wq_cpu_map(wq);
900         int cpu;
901
902         cpu_maps_update_begin();
903         spin_lock(&workqueue_lock);
904         list_del(&wq->list);
905         spin_unlock(&workqueue_lock);
906
907         for_each_cpu(cpu, cpu_map)
908                 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
909         cpu_maps_update_done();
910
911         free_percpu(wq->cpu_wq);
912         kfree(wq);
913 }
914 EXPORT_SYMBOL_GPL(destroy_workqueue);
915
916 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
917                                                 unsigned long action,
918                                                 void *hcpu)
919 {
920         unsigned int cpu = (unsigned long)hcpu;
921         struct cpu_workqueue_struct *cwq;
922         struct workqueue_struct *wq;
923         int ret = NOTIFY_OK;
924
925         action &= ~CPU_TASKS_FROZEN;
926
927         switch (action) {
928         case CPU_UP_PREPARE:
929                 cpumask_set_cpu(cpu, cpu_populated_map);
930         }
931 undo:
932         list_for_each_entry(wq, &workqueues, list) {
933                 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
934
935                 switch (action) {
936                 case CPU_UP_PREPARE:
937                         if (!create_workqueue_thread(cwq, cpu))
938                                 break;
939                         printk(KERN_ERR "workqueue [%s] for %i failed\n",
940                                 wq->name, cpu);
941                         action = CPU_UP_CANCELED;
942                         ret = NOTIFY_BAD;
943                         goto undo;
944
945                 case CPU_ONLINE:
946                         start_workqueue_thread(cwq, cpu);
947                         break;
948
949                 case CPU_UP_CANCELED:
950                         start_workqueue_thread(cwq, -1);
951                 case CPU_POST_DEAD:
952                         cleanup_workqueue_thread(cwq);
953                         break;
954                 }
955         }
956
957         switch (action) {
958         case CPU_UP_CANCELED:
959         case CPU_POST_DEAD:
960                 cpumask_clear_cpu(cpu, cpu_populated_map);
961         }
962
963         return ret;
964 }
965
966 #ifdef CONFIG_SMP
967
968 struct work_for_cpu {
969         struct completion completion;
970         long (*fn)(void *);
971         void *arg;
972         long ret;
973 };
974
975 static int do_work_for_cpu(void *_wfc)
976 {
977         struct work_for_cpu *wfc = _wfc;
978         wfc->ret = wfc->fn(wfc->arg);
979         complete(&wfc->completion);
980         return 0;
981 }
982
983 /**
984  * work_on_cpu - run a function in user context on a particular cpu
985  * @cpu: the cpu to run on
986  * @fn: the function to run
987  * @arg: the function arg
988  *
989  * This will return the value @fn returns.
990  * It is up to the caller to ensure that the cpu doesn't go offline.
991  * The caller must not hold any locks which would prevent @fn from completing.
992  */
993 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
994 {
995         struct task_struct *sub_thread;
996         struct work_for_cpu wfc = {
997                 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
998                 .fn = fn,
999                 .arg = arg,
1000         };
1001
1002         sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1003         if (IS_ERR(sub_thread))
1004                 return PTR_ERR(sub_thread);
1005         kthread_bind(sub_thread, cpu);
1006         wake_up_process(sub_thread);
1007         wait_for_completion(&wfc.completion);
1008         return wfc.ret;
1009 }
1010 EXPORT_SYMBOL_GPL(work_on_cpu);
1011 #endif /* CONFIG_SMP */
1012
1013 void __init init_workqueues(void)
1014 {
1015         alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1016
1017         cpumask_copy(cpu_populated_map, cpu_online_mask);
1018         singlethread_cpu = cpumask_first(cpu_possible_mask);
1019         cpu_singlethread_map = cpumask_of(singlethread_cpu);
1020         hotcpu_notifier(workqueue_cpu_callback, 0);
1021         keventd_wq = create_workqueue("events");
1022         BUG_ON(!keventd_wq);
1023 }