2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 static void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75 if (tsk && tsk->state != TASK_RUNNING)
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
98 WARN_ON_ONCE(in_irq());
100 raw_local_irq_save(flags);
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
108 preempt_count() += cnt;
110 * Were softirqs turned off above:
112 if (softirq_count() == cnt)
113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
116 if (preempt_count() == cnt)
117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
122 add_preempt_count(cnt);
125 #endif /* CONFIG_TRACE_IRQFLAGS */
127 void local_bh_disable(void)
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
133 EXPORT_SYMBOL(local_bh_disable);
135 static void __local_bh_enable(unsigned int cnt)
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
150 void _local_bh_enable(void)
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
155 EXPORT_SYMBOL(_local_bh_enable);
157 static inline void _local_bh_enable_ip(unsigned long ip)
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
164 * Are softirqs going to be turned on now:
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
167 trace_softirqs_on(ip);
169 * Keep preemption disabled until we are done with
170 * softirq processing:
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
178 #ifdef CONFIG_TRACE_IRQFLAGS
181 preempt_check_resched();
184 void local_bh_enable(void)
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188 EXPORT_SYMBOL(local_bh_enable);
190 void local_bh_enable_ip(unsigned long ip)
192 _local_bh_enable_ip(ip);
194 EXPORT_SYMBOL(local_bh_enable_ip);
197 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
198 * but break the loop if need_resched() is set or after 2 ms.
199 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
200 * certain cases, such as stop_machine(), jiffies may cease to
201 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
202 * well to make sure we eventually return from this method.
204 * These limits have been established via experimentation.
205 * The two things to balance is latency against fairness -
206 * we want to handle softirqs as soon as possible, but they
207 * should not be able to lock up the box.
209 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
210 #define MAX_SOFTIRQ_RESTART 10
212 asmlinkage void __do_softirq(void)
214 struct softirq_action *h;
216 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
218 int max_restart = MAX_SOFTIRQ_RESTART;
220 pending = local_softirq_pending();
221 account_system_vtime(current);
223 __local_bh_disable((unsigned long)__builtin_return_address(0),
225 lockdep_softirq_enter();
227 cpu = smp_processor_id();
229 /* Reset the pending bitmask before enabling irqs */
230 set_softirq_pending(0);
238 unsigned int vec_nr = h - softirq_vec;
239 int prev_count = preempt_count();
241 kstat_incr_softirqs_this_cpu(vec_nr);
243 trace_softirq_entry(vec_nr);
245 trace_softirq_exit(vec_nr);
246 if (unlikely(prev_count != preempt_count())) {
247 printk(KERN_ERR "huh, entered softirq %u %s %p"
248 "with preempt_count %08x,"
249 " exited with %08x?\n", vec_nr,
250 softirq_to_name[vec_nr], h->action,
251 prev_count, preempt_count());
252 preempt_count() = prev_count;
263 pending = local_softirq_pending();
265 if (time_before(jiffies, end) && !need_resched() &&
272 lockdep_softirq_exit();
274 account_system_vtime(current);
275 __local_bh_enable(SOFTIRQ_OFFSET);
278 #ifndef __ARCH_HAS_DO_SOFTIRQ
280 asmlinkage void do_softirq(void)
288 local_irq_save(flags);
290 pending = local_softirq_pending();
295 local_irq_restore(flags);
301 * Enter an interrupt context.
305 int cpu = smp_processor_id();
308 if (idle_cpu(cpu) && !in_interrupt()) {
310 * Prevent raise_softirq from needlessly waking up ksoftirqd
311 * here, as softirq will be serviced on return from interrupt.
314 tick_check_idle(cpu);
321 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
322 static inline void invoke_softirq(void)
324 if (!force_irqthreads)
327 __local_bh_disable((unsigned long)__builtin_return_address(0),
330 __local_bh_enable(SOFTIRQ_OFFSET);
334 static inline void invoke_softirq(void)
336 if (!force_irqthreads)
339 __local_bh_disable((unsigned long)__builtin_return_address(0),
342 __local_bh_enable(SOFTIRQ_OFFSET);
348 * Exit an interrupt context. Process softirqs if needed and possible:
352 account_system_vtime(current);
353 trace_hardirq_exit();
354 sub_preempt_count(IRQ_EXIT_OFFSET);
355 if (!in_interrupt() && local_softirq_pending())
360 /* Make sure that timer wheel updates are propagated */
361 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
362 tick_nohz_stop_sched_tick(0);
364 preempt_enable_no_resched();
368 * This function must run with irqs disabled!
370 inline void raise_softirq_irqoff(unsigned int nr)
372 __raise_softirq_irqoff(nr);
375 * If we're in an interrupt or softirq, we're done
376 * (this also catches softirq-disabled code). We will
377 * actually run the softirq once we return from
378 * the irq or softirq.
380 * Otherwise we wake up ksoftirqd to make sure we
381 * schedule the softirq soon.
387 void raise_softirq(unsigned int nr)
391 local_irq_save(flags);
392 raise_softirq_irqoff(nr);
393 local_irq_restore(flags);
396 void open_softirq(int nr, void (*action)(struct softirq_action *))
398 softirq_vec[nr].action = action;
406 struct tasklet_struct *head;
407 struct tasklet_struct **tail;
410 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
411 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
413 void __tasklet_schedule(struct tasklet_struct *t)
417 local_irq_save(flags);
419 *__this_cpu_read(tasklet_vec.tail) = t;
420 __this_cpu_write(tasklet_vec.tail, &(t->next));
421 raise_softirq_irqoff(TASKLET_SOFTIRQ);
422 local_irq_restore(flags);
425 EXPORT_SYMBOL(__tasklet_schedule);
427 void __tasklet_hi_schedule(struct tasklet_struct *t)
431 local_irq_save(flags);
433 *__this_cpu_read(tasklet_hi_vec.tail) = t;
434 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
435 raise_softirq_irqoff(HI_SOFTIRQ);
436 local_irq_restore(flags);
439 EXPORT_SYMBOL(__tasklet_hi_schedule);
441 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
443 BUG_ON(!irqs_disabled());
445 t->next = __this_cpu_read(tasklet_hi_vec.head);
446 __this_cpu_write(tasklet_hi_vec.head, t);
447 __raise_softirq_irqoff(HI_SOFTIRQ);
450 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
452 static void tasklet_action(struct softirq_action *a)
454 struct tasklet_struct *list;
457 list = __this_cpu_read(tasklet_vec.head);
458 __this_cpu_write(tasklet_vec.head, NULL);
459 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
463 struct tasklet_struct *t = list;
467 if (tasklet_trylock(t)) {
468 if (!atomic_read(&t->count)) {
469 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
480 *__this_cpu_read(tasklet_vec.tail) = t;
481 __this_cpu_write(tasklet_vec.tail, &(t->next));
482 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
487 static void tasklet_hi_action(struct softirq_action *a)
489 struct tasklet_struct *list;
492 list = __this_cpu_read(tasklet_hi_vec.head);
493 __this_cpu_write(tasklet_hi_vec.head, NULL);
494 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
498 struct tasklet_struct *t = list;
502 if (tasklet_trylock(t)) {
503 if (!atomic_read(&t->count)) {
504 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
515 *__this_cpu_read(tasklet_hi_vec.tail) = t;
516 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
517 __raise_softirq_irqoff(HI_SOFTIRQ);
523 void tasklet_init(struct tasklet_struct *t,
524 void (*func)(unsigned long), unsigned long data)
528 atomic_set(&t->count, 0);
533 EXPORT_SYMBOL(tasklet_init);
535 void tasklet_kill(struct tasklet_struct *t)
538 printk("Attempt to kill tasklet from interrupt\n");
540 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
543 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
545 tasklet_unlock_wait(t);
546 clear_bit(TASKLET_STATE_SCHED, &t->state);
549 EXPORT_SYMBOL(tasklet_kill);
556 * The trampoline is called when the hrtimer expires. It schedules a tasklet
557 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
558 * hrtimer callback, but from softirq context.
560 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
562 struct tasklet_hrtimer *ttimer =
563 container_of(timer, struct tasklet_hrtimer, timer);
565 tasklet_hi_schedule(&ttimer->tasklet);
566 return HRTIMER_NORESTART;
570 * Helper function which calls the hrtimer callback from
571 * tasklet/softirq context
573 static void __tasklet_hrtimer_trampoline(unsigned long data)
575 struct tasklet_hrtimer *ttimer = (void *)data;
576 enum hrtimer_restart restart;
578 restart = ttimer->function(&ttimer->timer);
579 if (restart != HRTIMER_NORESTART)
580 hrtimer_restart(&ttimer->timer);
584 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
585 * @ttimer: tasklet_hrtimer which is initialized
586 * @function: hrtimer callback function which gets called from softirq context
587 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
588 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
590 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
591 enum hrtimer_restart (*function)(struct hrtimer *),
592 clockid_t which_clock, enum hrtimer_mode mode)
594 hrtimer_init(&ttimer->timer, which_clock, mode);
595 ttimer->timer.function = __hrtimer_tasklet_trampoline;
596 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
597 (unsigned long)ttimer);
598 ttimer->function = function;
600 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
603 * Remote softirq bits
606 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
607 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
609 static void __local_trigger(struct call_single_data *cp, int softirq)
611 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
613 list_add_tail(&cp->list, head);
615 /* Trigger the softirq only if the list was previously empty. */
616 if (head->next == &cp->list)
617 raise_softirq_irqoff(softirq);
620 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
621 static void remote_softirq_receive(void *data)
623 struct call_single_data *cp = data;
629 local_irq_save(flags);
630 __local_trigger(cp, softirq);
631 local_irq_restore(flags);
634 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
636 if (cpu_online(cpu)) {
637 cp->func = remote_softirq_receive;
642 __smp_call_function_single(cpu, cp, 0);
647 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
648 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
655 * __send_remote_softirq - try to schedule softirq work on a remote cpu
656 * @cp: private SMP call function data area
657 * @cpu: the remote cpu
658 * @this_cpu: the currently executing cpu
659 * @softirq: the softirq for the work
661 * Attempt to schedule softirq work on a remote cpu. If this cannot be
662 * done, the work is instead queued up on the local cpu.
664 * Interrupts must be disabled.
666 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
668 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
669 __local_trigger(cp, softirq);
671 EXPORT_SYMBOL(__send_remote_softirq);
674 * send_remote_softirq - try to schedule softirq work on a remote cpu
675 * @cp: private SMP call function data area
676 * @cpu: the remote cpu
677 * @softirq: the softirq for the work
679 * Like __send_remote_softirq except that disabling interrupts and
680 * computing the current cpu is done for the caller.
682 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
687 local_irq_save(flags);
688 this_cpu = smp_processor_id();
689 __send_remote_softirq(cp, cpu, this_cpu, softirq);
690 local_irq_restore(flags);
692 EXPORT_SYMBOL(send_remote_softirq);
694 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
695 unsigned long action, void *hcpu)
698 * If a CPU goes away, splice its entries to the current CPU
699 * and trigger a run of the softirq
701 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
702 int cpu = (unsigned long) hcpu;
706 for (i = 0; i < NR_SOFTIRQS; i++) {
707 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
708 struct list_head *local_head;
710 if (list_empty(head))
713 local_head = &__get_cpu_var(softirq_work_list[i]);
714 list_splice_init(head, local_head);
715 raise_softirq_irqoff(i);
723 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
724 .notifier_call = remote_softirq_cpu_notify,
727 void __init softirq_init(void)
731 for_each_possible_cpu(cpu) {
734 per_cpu(tasklet_vec, cpu).tail =
735 &per_cpu(tasklet_vec, cpu).head;
736 per_cpu(tasklet_hi_vec, cpu).tail =
737 &per_cpu(tasklet_hi_vec, cpu).head;
738 for (i = 0; i < NR_SOFTIRQS; i++)
739 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
742 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
744 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
745 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
748 static int run_ksoftirqd(void * __bind_cpu)
750 set_current_state(TASK_INTERRUPTIBLE);
752 while (!kthread_should_stop()) {
754 if (!local_softirq_pending()) {
755 preempt_enable_no_resched();
760 __set_current_state(TASK_RUNNING);
762 while (local_softirq_pending()) {
763 /* Preempt disable stops cpu going offline.
764 If already offline, we'll be on wrong CPU:
766 if (cpu_is_offline((long)__bind_cpu))
769 if (local_softirq_pending())
772 preempt_enable_no_resched();
775 rcu_note_context_switch((long)__bind_cpu);
778 set_current_state(TASK_INTERRUPTIBLE);
780 __set_current_state(TASK_RUNNING);
785 /* Wait for kthread_stop */
786 set_current_state(TASK_INTERRUPTIBLE);
787 while (!kthread_should_stop()) {
789 set_current_state(TASK_INTERRUPTIBLE);
791 __set_current_state(TASK_RUNNING);
795 #ifdef CONFIG_HOTPLUG_CPU
797 * tasklet_kill_immediate is called to remove a tasklet which can already be
798 * scheduled for execution on @cpu.
800 * Unlike tasklet_kill, this function removes the tasklet
801 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
803 * When this function is called, @cpu must be in the CPU_DEAD state.
805 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
807 struct tasklet_struct **i;
809 BUG_ON(cpu_online(cpu));
810 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
812 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
815 /* CPU is dead, so no lock needed. */
816 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
819 /* If this was the tail element, move the tail ptr */
821 per_cpu(tasklet_vec, cpu).tail = i;
828 static void takeover_tasklets(unsigned int cpu)
830 /* CPU is dead, so no lock needed. */
833 /* Find end, append list for that CPU. */
834 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
835 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
836 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
837 per_cpu(tasklet_vec, cpu).head = NULL;
838 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
840 raise_softirq_irqoff(TASKLET_SOFTIRQ);
842 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
843 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
844 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
845 per_cpu(tasklet_hi_vec, cpu).head = NULL;
846 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
848 raise_softirq_irqoff(HI_SOFTIRQ);
852 #endif /* CONFIG_HOTPLUG_CPU */
854 static int __cpuinit cpu_callback(struct notifier_block *nfb,
855 unsigned long action,
858 int hotcpu = (unsigned long)hcpu;
859 struct task_struct *p;
863 case CPU_UP_PREPARE_FROZEN:
864 p = kthread_create_on_node(run_ksoftirqd,
867 "ksoftirqd/%d", hotcpu);
869 printk("ksoftirqd for %i failed\n", hotcpu);
870 return notifier_from_errno(PTR_ERR(p));
872 kthread_bind(p, hotcpu);
873 per_cpu(ksoftirqd, hotcpu) = p;
876 case CPU_ONLINE_FROZEN:
877 wake_up_process(per_cpu(ksoftirqd, hotcpu));
879 #ifdef CONFIG_HOTPLUG_CPU
880 case CPU_UP_CANCELED:
881 case CPU_UP_CANCELED_FROZEN:
882 if (!per_cpu(ksoftirqd, hotcpu))
884 /* Unbind so it can run. Fall thru. */
885 kthread_bind(per_cpu(ksoftirqd, hotcpu),
886 cpumask_any(cpu_online_mask));
888 case CPU_DEAD_FROZEN: {
889 static const struct sched_param param = {
890 .sched_priority = MAX_RT_PRIO-1
893 p = per_cpu(ksoftirqd, hotcpu);
894 per_cpu(ksoftirqd, hotcpu) = NULL;
895 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
897 takeover_tasklets(hotcpu);
900 #endif /* CONFIG_HOTPLUG_CPU */
905 static struct notifier_block __cpuinitdata cpu_nfb = {
906 .notifier_call = cpu_callback
909 static __init int spawn_ksoftirqd(void)
911 void *cpu = (void *)(long)smp_processor_id();
912 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
914 BUG_ON(err != NOTIFY_OK);
915 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
916 register_cpu_notifier(&cpu_nfb);
919 early_initcall(spawn_ksoftirqd);
922 * [ These __weak aliases are kept in a separate compilation unit, so that
923 * GCC does not inline them incorrectly. ]
926 int __init __weak early_irq_init(void)
931 #ifdef CONFIG_GENERIC_HARDIRQS
932 int __init __weak arch_probe_nr_irqs(void)
934 return NR_IRQS_LEGACY;
937 int __init __weak arch_early_irq_init(void)