2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/poll.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/wait.h>
35 #include <asm/semaphore.h>
37 #include <asm/mmu_context.h>
39 #include "interrupt.h"
41 static int __spu_trap_invalid_dma(struct spu *spu)
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
48 static int __spu_trap_dma_align(struct spu *spu)
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
55 static int __spu_trap_error(struct spu *spu)
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
62 static void spu_restart_dma(struct spu *spu)
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags))
67 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
70 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
72 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
76 pr_debug("%s\n", __FUNCTION__);
78 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
79 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
82 printk("%s: invalid access during switch!\n", __func__);
85 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
89 pr_debug("invalid region access at %016lx\n", ea);
93 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
98 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
106 spu_restart_dma(spu);
111 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
112 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
114 pr_debug("%s\n", __FUNCTION__);
116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
126 printk("%s: invalid access during switch!\n", __func__);
133 if (spu->stop_callback)
134 spu->stop_callback(spu);
138 static int __spu_trap_mailbox(struct spu *spu)
140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
145 out_be64(&spu->priv1->int_mask_class2_RW,
146 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
147 spin_unlock(&spu->register_lock);
151 static int __spu_trap_stop(struct spu *spu)
153 pr_debug("%s\n", __FUNCTION__);
154 spu->stop_code = in_be32(&spu->problem->spu_status_R);
155 if (spu->stop_callback)
156 spu->stop_callback(spu);
160 static int __spu_trap_halt(struct spu *spu)
162 pr_debug("%s\n", __FUNCTION__);
163 spu->stop_code = in_be32(&spu->problem->spu_status_R);
164 if (spu->stop_callback)
165 spu->stop_callback(spu);
169 static int __spu_trap_tag_group(struct spu *spu)
171 pr_debug("%s\n", __FUNCTION__);
172 /* wake_up(&spu->dma_wq); */
176 static int __spu_trap_spubox(struct spu *spu)
178 if (spu->wbox_callback)
179 spu->wbox_callback(spu);
181 /* atomically disable SPU mailbox interrupts */
182 spin_lock(&spu->register_lock);
183 out_be64(&spu->priv1->int_mask_class2_RW,
184 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
185 spin_unlock(&spu->register_lock);
190 spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
195 spu->class_0_pending = 1;
196 if (spu->stop_callback)
197 spu->stop_callback(spu);
203 spu_irq_class_0_bottom(struct spu *spu)
207 spu->class_0_pending = 0;
209 stat = in_be64(&spu->priv1->int_stat_class0_RW);
211 if (stat & 1) /* invalid MFC DMA */
212 __spu_trap_invalid_dma(spu);
214 if (stat & 2) /* invalid DMA alignment */
215 __spu_trap_dma_align(spu);
217 if (stat & 4) /* error on SPU */
218 __spu_trap_error(spu);
220 out_be64(&spu->priv1->int_stat_class0_RW, stat);
222 return (stat & 0x7) ? -EIO : 0;
224 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
227 spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
230 unsigned long stat, mask, dar, dsisr;
234 /* atomically read & clear class1 status. */
235 spin_lock(&spu->register_lock);
236 mask = in_be64(&spu->priv1->int_mask_class1_RW);
237 stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
238 dar = in_be64(&spu->priv1->mfc_dar_RW);
239 dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
240 out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
241 out_be64(&spu->priv1->int_stat_class1_RW, stat);
242 spin_unlock(&spu->register_lock);
244 if (stat & 1) /* segment fault */
245 __spu_trap_data_seg(spu, dar);
247 if (stat & 2) { /* mapping fault */
248 __spu_trap_data_map(spu, dar, dsisr);
251 if (stat & 4) /* ls compare & suspend on get */
254 if (stat & 8) /* ls compare & suspend on put */
257 return stat ? IRQ_HANDLED : IRQ_NONE;
259 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
262 spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
268 stat = in_be64(&spu->priv1->int_stat_class2_RW);
270 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
271 in_be64(&spu->priv1->int_mask_class2_RW));
274 if (stat & 1) /* PPC core mailbox */
275 __spu_trap_mailbox(spu);
277 if (stat & 2) /* SPU stop-and-signal */
278 __spu_trap_stop(spu);
280 if (stat & 4) /* SPU halted */
281 __spu_trap_halt(spu);
283 if (stat & 8) /* DMA tag group complete */
284 __spu_trap_tag_group(spu);
286 if (stat & 0x10) /* SPU mailbox threshold */
287 __spu_trap_spubox(spu);
289 out_be64(&spu->priv1->int_stat_class2_RW, stat);
290 return stat ? IRQ_HANDLED : IRQ_NONE;
294 spu_request_irqs(struct spu *spu)
299 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
301 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
302 ret = request_irq(irq_base + spu->isrc,
303 spu_irq_class_0, 0, spu->irq_c0, spu);
306 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
308 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
309 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
310 spu_irq_class_1, 0, spu->irq_c1, spu);
313 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
315 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
316 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
317 spu_irq_class_2, 0, spu->irq_c2, spu);
320 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
324 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
326 free_irq(irq_base + spu->isrc, spu);
332 spu_free_irqs(struct spu *spu)
336 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
338 free_irq(irq_base + spu->isrc, spu);
339 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
340 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
343 static LIST_HEAD(spu_list);
344 static DECLARE_MUTEX(spu_mutex);
346 static void spu_init_channels(struct spu *spu)
348 static const struct {
352 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
353 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
355 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
356 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
357 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
359 struct spu_priv2 *priv2;
364 /* initialize all channel data to zero */
365 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
368 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
369 for (count = 0; count < zero_list[i].count; count++)
370 out_be64(&priv2->spu_chnldata_RW, 0);
373 /* initialize channel counts to meaningful values */
374 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
375 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
376 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
380 static void spu_init_regs(struct spu *spu)
382 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
383 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
384 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
387 struct spu *spu_alloc(void)
392 if (!list_empty(&spu_list)) {
393 spu = list_entry(spu_list.next, struct spu, list);
394 list_del_init(&spu->list);
395 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
397 pr_debug("No SPU left\n");
403 spu_init_channels(spu);
409 EXPORT_SYMBOL_GPL(spu_alloc);
411 void spu_free(struct spu *spu)
414 list_add_tail(&spu->list, &spu_list);
417 EXPORT_SYMBOL_GPL(spu_free);
419 static int spu_handle_mm_fault(struct spu *spu)
421 struct mm_struct *mm = spu->mm;
422 struct vm_area_struct *vma;
423 u64 ea, dsisr, is_write;
429 if (!IS_VALID_EA(ea)) {
436 if (mm->pgd == NULL) {
440 down_read(&mm->mmap_sem);
441 vma = find_vma(mm, ea);
444 if (vma->vm_start <= ea)
446 if (!(vma->vm_flags & VM_GROWSDOWN))
449 if (expand_stack(vma, ea))
453 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
455 if (!(vma->vm_flags & VM_WRITE))
458 if (dsisr & MFC_DSISR_ACCESS_DENIED)
460 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
464 switch (handle_mm_fault(mm, vma, ea, is_write)) {
471 case VM_FAULT_SIGBUS:
480 up_read(&mm->mmap_sem);
484 up_read(&mm->mmap_sem);
488 int spu_irq_class_1_bottom(struct spu *spu)
490 u64 ea, dsisr, access, error = 0UL;
495 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
496 access = (_PAGE_PRESENT | _PAGE_USER);
497 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
498 if (hash_page(ea, access, 0x300) != 0)
499 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
501 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
502 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
503 if ((ret = spu_handle_mm_fault(spu)) != 0)
504 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
506 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
511 spu_restart_dma(spu);
513 __spu_trap_invalid_dma(spu);
518 static void __iomem * __init map_spe_prop(struct device_node *n,
521 struct address_prop {
522 unsigned long address;
524 } __attribute__((packed)) *prop;
529 p = get_property(n, name, &proplen);
530 if (proplen != sizeof (struct address_prop))
535 return ioremap(prop->address, prop->len);
538 static void spu_unmap(struct spu *spu)
542 iounmap(spu->problem);
543 iounmap((u8 __iomem *)spu->local_store);
546 static int __init spu_map_device(struct spu *spu, struct device_node *spe)
552 prop = get_property(spe, "isrc", NULL);
555 spu->isrc = *(unsigned int *)prop;
557 spu->name = get_property(spe, "name", NULL);
561 prop = get_property(spe, "local-store", NULL);
564 spu->local_store_phys = *(unsigned long *)prop;
566 /* we use local store as ram, not io memory */
567 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
568 if (!spu->local_store)
571 spu->problem= map_spe_prop(spe, "problem");
575 spu->priv1= map_spe_prop(spe, "priv1");
579 spu->priv2= map_spe_prop(spe, "priv2");
591 static int __init find_spu_node_id(struct device_node *spe)
594 struct device_node *cpu;
596 cpu = spe->parent->parent;
597 id = (unsigned int *)get_property(cpu, "node-id", NULL);
602 static int __init create_spu(struct device_node *spe)
609 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
613 ret = spu_map_device(spu, spe);
617 spu->node = find_spu_node_id(spe);
619 spu->slb_replace = 0;
624 spu->class_0_pending = 0;
628 spin_lock_init(&spu->register_lock);
630 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
631 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
633 spu->ibox_callback = NULL;
634 spu->wbox_callback = NULL;
635 spu->stop_callback = NULL;
638 spu->number = number++;
639 ret = spu_request_irqs(spu);
643 list_add(&spu->list, &spu_list);
646 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
647 spu->name, spu->isrc, spu->local_store,
648 spu->problem, spu->priv1, spu->priv2, spu->number);
660 static void destroy_spu(struct spu *spu)
662 list_del_init(&spu->list);
669 static void cleanup_spu_base(void)
671 struct spu *spu, *tmp;
673 list_for_each_entry_safe(spu, tmp, &spu_list, list)
677 module_exit(cleanup_spu_base);
679 static int __init init_spu_base(void)
681 struct device_node *node;
685 for (node = of_find_node_by_type(NULL, "spe");
686 node; node = of_find_node_by_type(node, "spe")) {
687 ret = create_spu(node);
689 printk(KERN_WARNING "%s: Error initializing %s\n",
690 __FUNCTION__, node->name);
695 /* in some old firmware versions, the spe is called 'spc', so we
696 look for that as well */
697 for (node = of_find_node_by_type(NULL, "spc");
698 node; node = of_find_node_by_type(node, "spc")) {
699 ret = create_spu(node);
701 printk(KERN_WARNING "%s: Error initializing %s\n",
702 __FUNCTION__, node->name);
709 module_init(init_spu_base);
711 MODULE_LICENSE("GPL");
712 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");