2 * linux/arch/arm/common/gic.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Interrupt architecture for the GIC:
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpu_pm.h>
30 #include <linux/cpumask.h>
32 #include <linux/interrupt.h>
33 #include <linux/percpu.h>
34 #include <linux/slab.h>
37 #include <asm/mach/irq.h>
38 #include <asm/hardware/gic.h>
40 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
42 /* Address of GIC 0 CPU interface */
43 void __iomem *gic_cpu_base_addr __read_mostly;
46 * Supported arch specific GIC irq extension.
47 * Default make them NULL.
49 struct irq_chip gic_arch_extn = {
53 .irq_retrigger = NULL,
62 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
64 static inline void __iomem *gic_dist_base(struct irq_data *d)
66 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
67 return gic_data->dist_base;
70 static inline void __iomem *gic_cpu_base(struct irq_data *d)
72 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
73 return gic_data->cpu_base;
76 static inline unsigned int gic_irq(struct irq_data *d)
78 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
79 return d->irq - gic_data->irq_offset;
83 * Routines to acknowledge, disable and enable interrupts
85 static void gic_mask_irq(struct irq_data *d)
87 u32 mask = 1 << (d->irq % 32);
89 raw_spin_lock(&irq_controller_lock);
90 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
91 if (gic_arch_extn.irq_mask)
92 gic_arch_extn.irq_mask(d);
93 raw_spin_unlock(&irq_controller_lock);
96 static void gic_unmask_irq(struct irq_data *d)
98 u32 mask = 1 << (d->irq % 32);
100 raw_spin_lock(&irq_controller_lock);
101 if (gic_arch_extn.irq_unmask)
102 gic_arch_extn.irq_unmask(d);
103 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
104 raw_spin_unlock(&irq_controller_lock);
107 static void gic_eoi_irq(struct irq_data *d)
109 if (gic_arch_extn.irq_eoi) {
110 raw_spin_lock(&irq_controller_lock);
111 gic_arch_extn.irq_eoi(d);
112 raw_spin_unlock(&irq_controller_lock);
115 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
118 static int gic_set_type(struct irq_data *d, unsigned int type)
120 void __iomem *base = gic_dist_base(d);
121 unsigned int gicirq = gic_irq(d);
122 u32 enablemask = 1 << (gicirq % 32);
123 u32 enableoff = (gicirq / 32) * 4;
124 u32 confmask = 0x2 << ((gicirq % 16) * 2);
125 u32 confoff = (gicirq / 16) * 4;
126 bool enabled = false;
129 /* Interrupt configuration for SGIs can't be changed */
133 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
136 raw_spin_lock(&irq_controller_lock);
138 if (gic_arch_extn.irq_set_type)
139 gic_arch_extn.irq_set_type(d, type);
141 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
142 if (type == IRQ_TYPE_LEVEL_HIGH)
144 else if (type == IRQ_TYPE_EDGE_RISING)
148 * As recommended by the spec, disable the interrupt before changing
151 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
152 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
156 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
159 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
161 raw_spin_unlock(&irq_controller_lock);
166 static int gic_retrigger(struct irq_data *d)
168 if (gic_arch_extn.irq_retrigger)
169 return gic_arch_extn.irq_retrigger(d);
175 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
178 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
179 unsigned int shift = (d->irq % 4) * 8;
180 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
183 if (cpu >= 8 || cpu >= nr_cpu_ids)
186 mask = 0xff << shift;
187 bit = 1 << (cpu_logical_map(cpu) + shift);
189 raw_spin_lock(&irq_controller_lock);
190 val = readl_relaxed(reg) & ~mask;
191 writel_relaxed(val | bit, reg);
192 raw_spin_unlock(&irq_controller_lock);
194 return IRQ_SET_MASK_OK;
199 static int gic_set_wake(struct irq_data *d, unsigned int on)
203 if (gic_arch_extn.irq_set_wake)
204 ret = gic_arch_extn.irq_set_wake(d, on);
210 #define gic_set_wake NULL
213 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
215 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
216 struct irq_chip *chip = irq_get_chip(irq);
217 unsigned int cascade_irq, gic_irq;
218 unsigned long status;
220 chained_irq_enter(chip, desc);
222 raw_spin_lock(&irq_controller_lock);
223 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
224 raw_spin_unlock(&irq_controller_lock);
226 gic_irq = (status & 0x3ff);
230 cascade_irq = gic_irq + chip_data->irq_offset;
231 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
232 do_bad_IRQ(cascade_irq, desc);
234 generic_handle_irq(cascade_irq);
237 chained_irq_exit(chip, desc);
240 static struct irq_chip gic_chip = {
242 .irq_mask = gic_mask_irq,
243 .irq_unmask = gic_unmask_irq,
244 .irq_eoi = gic_eoi_irq,
245 .irq_set_type = gic_set_type,
246 .irq_retrigger = gic_retrigger,
248 .irq_set_affinity = gic_set_affinity,
250 .irq_set_wake = gic_set_wake,
253 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
255 if (gic_nr >= MAX_GIC_NR)
257 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
259 irq_set_chained_handler(irq, gic_handle_cascade_irq);
262 static void __init gic_dist_init(struct gic_chip_data *gic,
263 unsigned int irq_start)
265 unsigned int gic_irqs, irq_limit, i;
267 void __iomem *base = gic->dist_base;
269 u32 nrppis = 0, ppi_base = 0;
272 cpu = cpu_logical_map(smp_processor_id());
276 cpumask |= cpumask << 8;
277 cpumask |= cpumask << 16;
279 writel_relaxed(0, base + GIC_DIST_CTRL);
282 * Find out how many interrupts are supported.
283 * The GIC only supports up to 1020 interrupt sources.
285 gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
286 gic_irqs = (gic_irqs + 1) * 32;
290 gic->gic_irqs = gic_irqs;
293 * Nobody would be insane enough to use PPIs on a secondary
296 if (gic == &gic_data[0]) {
297 nrppis = (32 - irq_start) & 31;
299 /* The GIC only supports up to 16 PPIs. */
303 ppi_base = gic->irq_offset + 32 - nrppis;
306 pr_info("Configuring GIC with %d sources (%d PPIs)\n",
307 gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);
310 * Set all global interrupts to be level triggered, active low.
312 for (i = 32; i < gic_irqs; i += 16)
313 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
316 * Set all global interrupts to this CPU only.
318 for (i = 32; i < gic_irqs; i += 4)
319 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
322 * Set priority on all global interrupts.
324 for (i = 32; i < gic_irqs; i += 4)
325 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
328 * Disable all interrupts. Leave the PPI and SGIs alone
329 * as these enables are banked registers.
331 for (i = 32; i < gic_irqs; i += 32)
332 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
335 * Limit number of interrupts registered to the platform maximum
337 irq_limit = gic->irq_offset + gic_irqs;
338 if (WARN_ON(irq_limit > NR_IRQS))
342 * Setup the Linux IRQ subsystem.
344 for (i = 0; i < nrppis; i++) {
345 int ppi = i + ppi_base;
347 irq_set_percpu_devid(ppi);
348 irq_set_chip_and_handler(ppi, &gic_chip,
349 handle_percpu_devid_irq);
350 irq_set_chip_data(ppi, gic);
351 set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
354 for (i = irq_start + nrppis; i < irq_limit; i++) {
355 irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
356 irq_set_chip_data(i, gic);
357 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
360 writel_relaxed(1, base + GIC_DIST_CTRL);
363 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
365 void __iomem *dist_base = gic->dist_base;
366 void __iomem *base = gic->cpu_base;
370 * Deal with the banked PPI and SGI interrupts - disable all
371 * PPI interrupts, ensure all SGI interrupts are enabled.
373 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
374 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
377 * Set priority on PPI and SGI interrupts
379 for (i = 0; i < 32; i += 4)
380 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
382 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
383 writel_relaxed(1, base + GIC_CPU_CTRL);
388 * Saves the GIC distributor registers during suspend or idle. Must be called
389 * with interrupts disabled but before powering down the GIC. After calling
390 * this function, no interrupts will be delivered by the GIC, and another
391 * platform-specific wakeup source must be enabled.
393 static void gic_dist_save(unsigned int gic_nr)
395 unsigned int gic_irqs;
396 void __iomem *dist_base;
399 if (gic_nr >= MAX_GIC_NR)
402 gic_irqs = gic_data[gic_nr].gic_irqs;
403 dist_base = gic_data[gic_nr].dist_base;
408 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
409 gic_data[gic_nr].saved_spi_conf[i] =
410 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
412 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
413 gic_data[gic_nr].saved_spi_target[i] =
414 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
416 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
417 gic_data[gic_nr].saved_spi_enable[i] =
418 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
422 * Restores the GIC distributor registers during resume or when coming out of
423 * idle. Must be called before enabling interrupts. If a level interrupt
424 * that occured while the GIC was suspended is still present, it will be
425 * handled normally, but any edge interrupts that occured will not be seen by
426 * the GIC and need to be handled by the platform-specific wakeup source.
428 static void gic_dist_restore(unsigned int gic_nr)
430 unsigned int gic_irqs;
432 void __iomem *dist_base;
434 if (gic_nr >= MAX_GIC_NR)
437 gic_irqs = gic_data[gic_nr].gic_irqs;
438 dist_base = gic_data[gic_nr].dist_base;
443 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
445 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
446 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
447 dist_base + GIC_DIST_CONFIG + i * 4);
449 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
450 writel_relaxed(0xa0a0a0a0,
451 dist_base + GIC_DIST_PRI + i * 4);
453 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
454 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
455 dist_base + GIC_DIST_TARGET + i * 4);
457 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
458 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
459 dist_base + GIC_DIST_ENABLE_SET + i * 4);
461 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
464 static void gic_cpu_save(unsigned int gic_nr)
468 void __iomem *dist_base;
469 void __iomem *cpu_base;
471 if (gic_nr >= MAX_GIC_NR)
474 dist_base = gic_data[gic_nr].dist_base;
475 cpu_base = gic_data[gic_nr].cpu_base;
477 if (!dist_base || !cpu_base)
480 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
481 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
482 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
484 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
485 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
486 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
490 static void gic_cpu_restore(unsigned int gic_nr)
494 void __iomem *dist_base;
495 void __iomem *cpu_base;
497 if (gic_nr >= MAX_GIC_NR)
500 dist_base = gic_data[gic_nr].dist_base;
501 cpu_base = gic_data[gic_nr].cpu_base;
503 if (!dist_base || !cpu_base)
506 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
507 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
508 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
510 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
511 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
512 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
514 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
515 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
517 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
518 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
521 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
525 for (i = 0; i < MAX_GIC_NR; i++) {
530 case CPU_PM_ENTER_FAILED:
534 case CPU_CLUSTER_PM_ENTER:
537 case CPU_CLUSTER_PM_ENTER_FAILED:
538 case CPU_CLUSTER_PM_EXIT:
547 static struct notifier_block gic_notifier_block = {
548 .notifier_call = gic_notifier,
551 static void __init gic_pm_init(struct gic_chip_data *gic)
553 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
555 BUG_ON(!gic->saved_ppi_enable);
557 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
559 BUG_ON(!gic->saved_ppi_conf);
561 cpu_pm_register_notifier(&gic_notifier_block);
564 static void __init gic_pm_init(struct gic_chip_data *gic)
569 void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
570 void __iomem *dist_base, void __iomem *cpu_base)
572 struct gic_chip_data *gic;
574 BUG_ON(gic_nr >= MAX_GIC_NR);
576 gic = &gic_data[gic_nr];
577 gic->dist_base = dist_base;
578 gic->cpu_base = cpu_base;
579 gic->irq_offset = (irq_start - 1) & ~31;
582 gic_cpu_base_addr = cpu_base;
584 gic_chip.flags |= gic_arch_extn.flags;
585 gic_dist_init(gic, irq_start);
590 void __cpuinit gic_secondary_init(unsigned int gic_nr)
592 BUG_ON(gic_nr >= MAX_GIC_NR);
594 gic_cpu_init(&gic_data[gic_nr]);
598 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
601 unsigned long map = 0;
603 /* Convert our logical CPU mask into a physical one. */
604 for_each_cpu(cpu, mask)
605 map |= 1 << cpu_logical_map(cpu);
608 * Ensure that stores to Normal memory are visible to the
609 * other CPUs before issuing the IPI.
613 /* this always happens on GIC0 */
614 writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);