2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
19 #include <asm/hw_irq.h>
21 atomic_t irq_err_count;
23 /* Function pointer for generic interrupt vector handling */
24 void (*x86_platform_ipi_callback)(void) = NULL;
27 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves.
30 void ack_bad_irq(unsigned int irq)
32 if (printk_ratelimit())
33 pr_err("unexpected IRQ trap at vector %02x\n", irq);
36 * Currently unexpected vectors happen only on SMP and APIC.
37 * We _must_ ack these because every local APIC has only N
38 * irq slots per priority level, and a 'hanging, unacked' IRQ
39 * holds up an irq slot - in excessive cases (when multiple
40 * unexpected vectors occur) that might lock up the APIC
42 * But only ack when the APIC is enabled -AK
47 #define irq_stats(x) (&per_cpu(irq_stat, x))
49 * /proc/interrupts printing for arch specific interrupts
51 int arch_show_interrupts(struct seq_file *p, int prec)
55 seq_printf(p, "%*s: ", prec, "NMI");
56 for_each_online_cpu(j)
57 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
58 seq_printf(p, " Non-maskable interrupts\n");
59 #ifdef CONFIG_X86_LOCAL_APIC
60 seq_printf(p, "%*s: ", prec, "LOC");
61 for_each_online_cpu(j)
62 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
63 seq_printf(p, " Local timer interrupts\n");
65 seq_printf(p, "%*s: ", prec, "SPU");
66 for_each_online_cpu(j)
67 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
68 seq_printf(p, " Spurious interrupts\n");
69 seq_printf(p, "%*s: ", prec, "PMI");
70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
72 seq_printf(p, " Performance monitoring interrupts\n");
73 seq_printf(p, "%*s: ", prec, "IWI");
74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
76 seq_printf(p, " IRQ work interrupts\n");
78 if (x86_platform_ipi_callback) {
79 seq_printf(p, "%*s: ", prec, "PLT");
80 for_each_online_cpu(j)
81 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
82 seq_printf(p, " Platform interrupts\n");
85 seq_printf(p, "%*s: ", prec, "RES");
86 for_each_online_cpu(j)
87 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
88 seq_printf(p, " Rescheduling interrupts\n");
89 seq_printf(p, "%*s: ", prec, "CAL");
90 for_each_online_cpu(j)
91 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
92 seq_printf(p, " Function call interrupts\n");
93 seq_printf(p, "%*s: ", prec, "TLB");
94 for_each_online_cpu(j)
95 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
96 seq_printf(p, " TLB shootdowns\n");
98 #ifdef CONFIG_X86_THERMAL_VECTOR
99 seq_printf(p, "%*s: ", prec, "TRM");
100 for_each_online_cpu(j)
101 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
102 seq_printf(p, " Thermal event interrupts\n");
104 #ifdef CONFIG_X86_MCE_THRESHOLD
105 seq_printf(p, "%*s: ", prec, "THR");
106 for_each_online_cpu(j)
107 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
108 seq_printf(p, " Threshold APIC interrupts\n");
110 #ifdef CONFIG_X86_MCE
111 seq_printf(p, "%*s: ", prec, "MCE");
112 for_each_online_cpu(j)
113 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
114 seq_printf(p, " Machine check exceptions\n");
115 seq_printf(p, "%*s: ", prec, "MCP");
116 for_each_online_cpu(j)
117 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
118 seq_printf(p, " Machine check polls\n");
120 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
121 #if defined(CONFIG_X86_IO_APIC)
122 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
130 u64 arch_irq_stat_cpu(unsigned int cpu)
132 u64 sum = irq_stats(cpu)->__nmi_count;
134 #ifdef CONFIG_X86_LOCAL_APIC
135 sum += irq_stats(cpu)->apic_timer_irqs;
136 sum += irq_stats(cpu)->irq_spurious_count;
137 sum += irq_stats(cpu)->apic_perf_irqs;
138 sum += irq_stats(cpu)->apic_irq_work_irqs;
140 if (x86_platform_ipi_callback)
141 sum += irq_stats(cpu)->x86_platform_ipis;
143 sum += irq_stats(cpu)->irq_resched_count;
144 sum += irq_stats(cpu)->irq_call_count;
145 sum += irq_stats(cpu)->irq_tlb_count;
147 #ifdef CONFIG_X86_THERMAL_VECTOR
148 sum += irq_stats(cpu)->irq_thermal_count;
150 #ifdef CONFIG_X86_MCE_THRESHOLD
151 sum += irq_stats(cpu)->irq_threshold_count;
153 #ifdef CONFIG_X86_MCE
154 sum += per_cpu(mce_exception_count, cpu);
155 sum += per_cpu(mce_poll_count, cpu);
160 u64 arch_irq_stat(void)
162 u64 sum = atomic_read(&irq_err_count);
168 * do_IRQ handles all normal device IRQ's (the special
169 * SMP cross-CPU interrupts have their own specific
172 unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
174 struct pt_regs *old_regs = set_irq_regs(regs);
176 /* high bit used in ret_from_ code */
177 unsigned vector = ~regs->orig_ax;
183 irq = __this_cpu_read(vector_irq[vector]);
185 if (!handle_irq(irq, regs)) {
188 if (printk_ratelimit())
189 pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
190 __func__, smp_processor_id(), vector, irq);
195 set_irq_regs(old_regs);
200 * Handler for X86_PLATFORM_IPI_VECTOR.
202 void smp_x86_platform_ipi(struct pt_regs *regs)
204 struct pt_regs *old_regs = set_irq_regs(regs);
212 inc_irq_stat(x86_platform_ipis);
214 if (x86_platform_ipi_callback)
215 x86_platform_ipi_callback();
219 set_irq_regs(old_regs);
222 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
224 #ifdef CONFIG_HOTPLUG_CPU
226 * This cpu is going to be removed and its vectors migrated to the remaining
227 * online cpus. Check to see if there are enough vectors in the remaining cpus.
228 * This function is protected by stop_machine().
230 int check_irq_vectors_for_cpu_disable(void)
233 unsigned int this_cpu, vector, this_count, count;
234 struct irq_desc *desc;
235 struct irq_data *data;
236 struct cpumask affinity_new, online_new;
238 this_cpu = smp_processor_id();
239 cpumask_copy(&online_new, cpu_online_mask);
240 cpu_clear(this_cpu, online_new);
243 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
244 irq = __this_cpu_read(vector_irq[vector]);
246 desc = irq_to_desc(irq);
247 data = irq_desc_get_irq_data(desc);
248 cpumask_copy(&affinity_new, data->affinity);
249 cpu_clear(this_cpu, affinity_new);
251 /* Do not count inactive or per-cpu irqs. */
252 if (!irq_has_action(irq) || irqd_is_per_cpu(data))
256 * A single irq may be mapped to multiple
257 * cpu's vector_irq[] (for example IOAPIC cluster
258 * mode). In this case we have two
261 * 1) the resulting affinity mask is empty; that is
262 * this the down'd cpu is the last cpu in the irq's
265 * 2) the resulting affinity mask is no longer
266 * a subset of the online cpus but the affinity
267 * mask is not zero; that is the down'd cpu is the
268 * last online cpu in a user set affinity mask.
270 if (cpumask_empty(&affinity_new) ||
271 !cpumask_subset(&affinity_new, &online_new))
277 for_each_online_cpu(cpu) {
280 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
282 if (per_cpu(vector_irq, cpu)[vector] < 0)
287 if (count < this_count) {
288 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
289 this_cpu, this_count, count);
295 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
296 void fixup_irqs(void)
298 unsigned int irq, vector;
300 struct irq_desc *desc;
301 struct irq_data *data;
302 struct irq_chip *chip;
304 for_each_irq_desc(irq, desc) {
305 int break_affinity = 0;
306 int set_affinity = 1;
307 const struct cpumask *affinity;
314 /* interrupt's are disabled at this point */
315 raw_spin_lock(&desc->lock);
317 data = irq_desc_get_irq_data(desc);
318 affinity = data->affinity;
319 if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
320 cpumask_subset(affinity, cpu_online_mask)) {
321 raw_spin_unlock(&desc->lock);
326 * Complete the irq move. This cpu is going down and for
327 * non intr-remapping case, we can't wait till this interrupt
328 * arrives at this cpu before completing the irq move.
330 irq_force_complete_move(irq);
332 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
334 affinity = cpu_all_mask;
337 chip = irq_data_get_irq_chip(data);
338 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
339 chip->irq_mask(data);
341 if (chip->irq_set_affinity)
342 chip->irq_set_affinity(data, affinity, true);
343 else if (!(warned++))
346 if (!irqd_can_move_in_process_context(data) &&
347 !irqd_irq_disabled(data) && chip->irq_unmask)
348 chip->irq_unmask(data);
350 raw_spin_unlock(&desc->lock);
352 if (break_affinity && set_affinity)
353 printk("Broke affinity for irq %i\n", irq);
354 else if (!set_affinity)
355 printk("Cannot set affinity for irq %i\n", irq);
359 * We can remove mdelay() and then send spuriuous interrupts to
360 * new cpu targets for all the irqs that were handled previously by
361 * this cpu. While it works, I have seen spurious interrupt messages
362 * (nothing wrong but still...).
364 * So for now, retain mdelay(1) and check the IRR and then send those
365 * interrupts to new targets as this cpu is already offlined...
369 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
372 if (__this_cpu_read(vector_irq[vector]) < 0)
375 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
376 if (irr & (1 << (vector % 32))) {
377 irq = __this_cpu_read(vector_irq[vector]);
379 desc = irq_to_desc(irq);
380 data = irq_desc_get_irq_data(desc);
381 chip = irq_data_get_irq_chip(data);
382 raw_spin_lock(&desc->lock);
383 if (chip->irq_retrigger)
384 chip->irq_retrigger(data);
385 raw_spin_unlock(&desc->lock);