2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
17 #include <asm/uv/uv_irq.h>
18 #include <asm/uv/uv_hub.h>
20 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
21 struct uv_irq_2_mmr_pnode{
28 static spinlock_t uv_irq_lock;
29 static struct rb_root uv_irq_root;
31 static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
33 static void uv_noop(unsigned int irq)
37 static unsigned int uv_noop_ret(unsigned int irq)
42 static void uv_ack_apic(unsigned int irq)
47 static struct irq_chip uv_irq_chip = {
49 .startup = uv_noop_ret,
58 .set_affinity = uv_set_irq_affinity,
62 * Add offset and pnode information of the hub sourcing interrupts to the
63 * rb tree for a specific irq.
65 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
67 struct rb_node **link = &uv_irq_root.rb_node;
68 struct rb_node *parent = NULL;
69 struct uv_irq_2_mmr_pnode *n;
70 struct uv_irq_2_mmr_pnode *e;
71 unsigned long irqflags;
73 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
74 uv_blade_to_memory_nid(blade));
80 n->pnode = uv_blade_to_pnode(blade);
81 spin_lock_irqsave(&uv_irq_lock, irqflags);
82 /* Find the right place in the rbtree: */
85 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
87 if (unlikely(irq == e->irq)) {
88 /* irq entry exists */
89 e->pnode = uv_blade_to_pnode(blade);
91 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
97 link = &(*link)->rb_left;
99 link = &(*link)->rb_right;
102 /* Insert the node into the rbtree. */
103 rb_link_node(&n->list, parent, link);
104 rb_insert_color(&n->list, &uv_irq_root);
106 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
110 /* Retrieve offset and pnode information from the rb tree for a specific irq */
111 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
113 struct uv_irq_2_mmr_pnode *e;
115 unsigned long irqflags;
117 spin_lock_irqsave(&uv_irq_lock, irqflags);
118 n = uv_irq_root.rb_node;
120 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
125 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
134 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
139 * Re-target the irq to the specified CPU and enable the specified MMR located
140 * on the specified blade to allow the sending of MSIs to the specified CPU.
143 arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
144 unsigned long mmr_offset, int limit)
146 const struct cpumask *eligible_cpu = cpumask_of(cpu);
147 struct irq_desc *desc = irq_to_desc(irq);
150 unsigned long mmr_value;
151 struct uv_IO_APIC_route_entry *entry;
154 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
155 sizeof(unsigned long));
159 err = assign_irq_vector(irq, cfg, eligible_cpu);
163 if (limit == UV_AFFINITY_CPU)
164 desc->status |= IRQ_NO_BALANCING;
166 desc->status |= IRQ_MOVE_PCNTXT;
168 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
172 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
173 entry->vector = cfg->vector;
174 entry->delivery_mode = apic->irq_delivery_mode;
175 entry->dest_mode = apic->irq_dest_mode;
179 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
181 mmr_pnode = uv_blade_to_pnode(mmr_blade);
182 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
184 if (cfg->move_in_progress)
185 send_cleanup_vector(cfg);
191 * Disable the specified MMR located on the specified blade so that MSIs are
192 * longer allowed to be sent.
194 static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
196 unsigned long mmr_value;
197 struct uv_IO_APIC_route_entry *entry;
199 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
200 sizeof(unsigned long));
203 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
206 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
209 static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
211 struct irq_desc *desc = irq_to_desc(irq);
212 struct irq_cfg *cfg = desc->chip_data;
214 unsigned long mmr_value;
215 struct uv_IO_APIC_route_entry *entry;
216 unsigned long mmr_offset;
219 if (set_desc_affinity(desc, mask, &dest))
223 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
225 entry->vector = cfg->vector;
226 entry->delivery_mode = apic->irq_delivery_mode;
227 entry->dest_mode = apic->irq_dest_mode;
233 /* Get previously stored MMR and pnode of hub sourcing interrupts */
234 if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
237 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
239 if (cfg->move_in_progress)
240 send_cleanup_vector(cfg);
246 * Set up a mapping of an available irq and vector, and enable the specified
247 * MMR that defines the MSI that is to be sent to the specified CPU when an
248 * interrupt is raised.
250 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
251 unsigned long mmr_offset, int limit)
255 irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
260 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
263 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
269 EXPORT_SYMBOL_GPL(uv_setup_irq);
272 * Tear down a mapping of an irq and vector, and disable the specified MMR that
273 * defined the MSI that was to be sent to the specified CPU when an interrupt
276 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
278 void uv_teardown_irq(unsigned int irq)
280 struct uv_irq_2_mmr_pnode *e;
282 unsigned long irqflags;
284 spin_lock_irqsave(&uv_irq_lock, irqflags);
285 n = uv_irq_root.rb_node;
287 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
289 arch_disable_uv_irq(e->pnode, e->offset);
290 rb_erase(n, &uv_irq_root);
299 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
302 EXPORT_SYMBOL_GPL(uv_teardown_irq);