2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/irq_work.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/irqflags.h>
15 #include <linux/sched.h>
16 #include <linux/tick.h>
17 #include <linux/cpu.h>
18 #include <linux/notifier.h>
19 #include <asm/processor.h>
22 static DEFINE_PER_CPU(struct llist_head, irq_work_list);
23 static DEFINE_PER_CPU(int, irq_work_raised);
26 * Claim the entry so that no one else will poke at it.
28 static bool irq_work_claim(struct irq_work *work)
30 unsigned long flags, oflags, nflags;
33 * Start with our best wish as a premise but only trust any
34 * flag value after cmpxchg() result.
36 flags = work->flags & ~IRQ_WORK_PENDING;
38 nflags = flags | IRQ_WORK_FLAGS;
39 oflags = cmpxchg(&work->flags, flags, nflags);
42 if (oflags & IRQ_WORK_PENDING)
51 void __weak arch_irq_work_raise(void)
54 * Lame architectures will get the timer tick callback
59 * Enqueue the irq_work @entry unless it's already pending
62 * Can be re-enqueued while the callback is still in progress.
64 bool irq_work_queue(struct irq_work *work)
66 /* Only queue if not already pending */
67 if (!irq_work_claim(work))
70 /* Queue the entry and raise the IPI if needed. */
73 llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
76 * If the work is not "lazy" or the tick is stopped, raise the irq
77 * work interrupt (if supported by the arch), otherwise, just wait
80 if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
81 if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
82 arch_irq_work_raise();
89 EXPORT_SYMBOL_GPL(irq_work_queue);
91 bool irq_work_needs_cpu(void)
93 struct llist_head *this_list;
95 this_list = &__get_cpu_var(irq_work_list);
96 if (llist_empty(this_list))
99 /* All work should have been flushed before going offline */
100 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
105 static void __irq_work_run(void)
108 struct irq_work *work;
109 struct llist_head *this_list;
110 struct llist_node *llnode;
114 * Reset the "raised" state right before we check the list because
115 * an NMI may enqueue after we find the list empty from the runner.
117 __this_cpu_write(irq_work_raised, 0);
120 this_list = &__get_cpu_var(irq_work_list);
121 if (llist_empty(this_list))
124 BUG_ON(!irqs_disabled());
126 llnode = llist_del_all(this_list);
127 while (llnode != NULL) {
128 work = llist_entry(llnode, struct irq_work, llnode);
130 llnode = llist_next(llnode);
133 * Clear the PENDING bit, after this point the @work
135 * Make it immediately visible so that other CPUs trying
136 * to claim that work don't rely on us to handle their data
137 * while we are in the middle of the func.
139 flags = work->flags & ~IRQ_WORK_PENDING;
140 xchg(&work->flags, flags);
144 * Clear the BUSY bit and return to the free state if
145 * no-one else claimed it meanwhile.
147 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
152 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
153 * context with local IRQs disabled.
155 void irq_work_run(void)
160 EXPORT_SYMBOL_GPL(irq_work_run);
163 * Synchronize against the irq_work @entry, ensures the entry is not
166 void irq_work_sync(struct irq_work *work)
168 WARN_ON_ONCE(irqs_disabled());
170 while (work->flags & IRQ_WORK_BUSY)
173 EXPORT_SYMBOL_GPL(irq_work_sync);
175 #ifdef CONFIG_HOTPLUG_CPU
176 static int irq_work_cpu_notify(struct notifier_block *self,
177 unsigned long action, void *hcpu)
179 long cpu = (long)hcpu;
183 /* Called from stop_machine */
184 if (WARN_ON_ONCE(cpu != smp_processor_id()))
194 static struct notifier_block cpu_notify;
196 static __init int irq_work_init_cpu_notifier(void)
198 cpu_notify.notifier_call = irq_work_cpu_notify;
199 cpu_notify.priority = 0;
200 register_cpu_notifier(&cpu_notify);
203 device_initcall(irq_work_init_cpu_notifier);
205 #endif /* CONFIG_HOTPLUG_CPU */