2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/linux_logo.h>
36 #include <asm/spu_priv1.h>
37 #include <asm/spu_csa.h>
40 #include <asm/kexec.h>
42 const struct spu_management_ops *spu_management_ops;
43 EXPORT_SYMBOL_GPL(spu_management_ops);
45 const struct spu_priv1_ops *spu_priv1_ops;
46 EXPORT_SYMBOL_GPL(spu_priv1_ops);
48 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
49 EXPORT_SYMBOL_GPL(cbe_spu_info);
52 * The spufs fault-handling code needs to call force_sig_info to raise signals
53 * on DMA errors. Export it here to avoid general kernel-wide access to this
56 EXPORT_SYMBOL_GPL(force_sig_info);
59 * Protects cbe_spu_info and spu->number.
61 static DEFINE_SPINLOCK(spu_lock);
64 * List of all spus in the system.
66 * This list is iterated by callers from irq context and callers that
67 * want to sleep. Thus modifications need to be done with both
68 * spu_full_list_lock and spu_full_list_mutex held, while iterating
69 * through it requires either of these locks.
71 * In addition spu_full_list_lock protects all assignmens to
74 static LIST_HEAD(spu_full_list);
75 static DEFINE_SPINLOCK(spu_full_list_lock);
76 static DEFINE_MUTEX(spu_full_list_mutex);
82 void spu_invalidate_slbs(struct spu *spu)
84 struct spu_priv2 __iomem *priv2 = spu->priv2;
87 spin_lock_irqsave(&spu->register_lock, flags);
88 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
89 out_be64(&priv2->slb_invalidate_all_W, 0UL);
90 spin_unlock_irqrestore(&spu->register_lock, flags);
92 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
94 /* This is called by the MM core when a segment size is changed, to
95 * request a flush of all the SPEs using a given mm
97 void spu_flush_all_slbs(struct mm_struct *mm)
102 spin_lock_irqsave(&spu_full_list_lock, flags);
103 list_for_each_entry(spu, &spu_full_list, full_list) {
105 spu_invalidate_slbs(spu);
107 spin_unlock_irqrestore(&spu_full_list_lock, flags);
110 /* The hack below stinks... try to do something better one of
111 * these days... Does it even work properly with NR_CPUS == 1 ?
113 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
115 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
117 /* Global TLBIE broadcast required with SPEs. */
118 bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
121 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
125 spin_lock_irqsave(&spu_full_list_lock, flags);
127 spin_unlock_irqrestore(&spu_full_list_lock, flags);
129 mm_needs_global_tlbie(mm);
131 EXPORT_SYMBOL_GPL(spu_associate_mm);
133 int spu_64k_pages_available(void)
135 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
137 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
139 static void spu_restart_dma(struct spu *spu)
141 struct spu_priv2 __iomem *priv2 = spu->priv2;
143 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
144 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
146 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
151 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
153 struct spu_priv2 __iomem *priv2 = spu->priv2;
155 pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
156 __func__, slbe, slb->vsid, slb->esid);
158 out_be64(&priv2->slb_index_W, slbe);
159 /* set invalid before writing vsid */
160 out_be64(&priv2->slb_esid_RW, 0);
161 /* now it's safe to write the vsid */
162 out_be64(&priv2->slb_vsid_RW, slb->vsid);
163 /* setting the new esid makes the entry valid again */
164 out_be64(&priv2->slb_esid_RW, slb->esid);
167 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
169 struct mm_struct *mm = spu->mm;
173 pr_debug("%s\n", __func__);
175 slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
177 switch(REGION_ID(ea)) {
179 #ifdef CONFIG_PPC_MM_SLICES
180 psize = get_slice_psize(mm, ea);
182 psize = mm->context.user_psize;
184 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
185 << SLB_VSID_SHIFT) | SLB_VSID_USER;
187 case VMALLOC_REGION_ID:
188 if (ea < VMALLOC_END)
189 psize = mmu_vmalloc_psize;
191 psize = mmu_io_psize;
192 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
193 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
195 case KERNEL_REGION_ID:
196 psize = mmu_linear_psize;
197 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
198 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
201 /* Future: support kernel segments so that drivers
204 pr_debug("invalid region access at %016lx\n", ea);
207 slb.vsid |= mmu_psize_defs[psize].sllp;
209 spu_load_slb(spu, spu->slb_replace, &slb);
212 if (spu->slb_replace >= 8)
213 spu->slb_replace = 0;
215 spu_restart_dma(spu);
216 spu->stats.slb_flt++;
220 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
221 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
225 pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
228 * Handle kernel space hash faults immediately. User hash
229 * faults need to be deferred to process context.
231 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
232 (REGION_ID(ea) != USER_REGION_ID)) {
234 spin_unlock(&spu->register_lock);
235 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
236 spin_lock(&spu->register_lock);
239 spu_restart_dma(spu);
244 spu->class_1_dar = ea;
245 spu->class_1_dsisr = dsisr;
247 spu->stop_callback(spu, 1);
249 spu->class_1_dar = 0;
250 spu->class_1_dsisr = 0;
255 static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
257 unsigned long ea = (unsigned long)addr;
260 if (REGION_ID(ea) == KERNEL_REGION_ID)
261 llp = mmu_psize_defs[mmu_linear_psize].sllp;
263 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
265 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
266 SLB_VSID_KERNEL | llp;
267 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
271 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
272 * address @new_addr is present.
274 static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
277 unsigned long ea = (unsigned long)new_addr;
280 for (i = 0; i < nr_slbs; i++)
281 if (!((slbs[i].esid ^ ea) & ESID_MASK))
288 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
289 * need to map both the context save area, and the save/restore code.
291 * Because the lscsa and code may cross segment boundaires, we check to see
292 * if mappings are required for the start and end of each range. We currently
293 * assume that the mappings are smaller that one segment - if not, something
294 * is seriously wrong.
296 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
297 void *code, int code_size)
299 struct spu_slb slbs[4];
301 /* start and end addresses of both mappings */
303 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
304 code, code + code_size - 1
307 /* check the set of addresses, and create a new entry in the slbs array
308 * if there isn't already a SLB for that address */
309 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
310 if (__slb_present(slbs, nr_slbs, addrs[i]))
313 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
317 spin_lock_irq(&spu->register_lock);
318 /* Add the set of SLBs */
319 for (i = 0; i < nr_slbs; i++)
320 spu_load_slb(spu, i, &slbs[i]);
321 spin_unlock_irq(&spu->register_lock);
323 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
326 spu_irq_class_0(int irq, void *data)
329 unsigned long stat, mask;
333 spin_lock(&spu->register_lock);
334 mask = spu_int_mask_get(spu, 0);
335 stat = spu_int_stat_get(spu, 0) & mask;
337 spu->class_0_pending |= stat;
338 spu->class_0_dar = spu_mfc_dar_get(spu);
339 spu->stop_callback(spu, 0);
340 spu->class_0_pending = 0;
341 spu->class_0_dar = 0;
343 spu_int_stat_clear(spu, 0, stat);
344 spin_unlock(&spu->register_lock);
350 spu_irq_class_1(int irq, void *data)
353 unsigned long stat, mask, dar, dsisr;
357 /* atomically read & clear class1 status. */
358 spin_lock(&spu->register_lock);
359 mask = spu_int_mask_get(spu, 1);
360 stat = spu_int_stat_get(spu, 1) & mask;
361 dar = spu_mfc_dar_get(spu);
362 dsisr = spu_mfc_dsisr_get(spu);
363 if (stat & CLASS1_STORAGE_FAULT_INTR)
364 spu_mfc_dsisr_set(spu, 0ul);
365 spu_int_stat_clear(spu, 1, stat);
367 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
370 if (stat & CLASS1_SEGMENT_FAULT_INTR)
371 __spu_trap_data_seg(spu, dar);
373 if (stat & CLASS1_STORAGE_FAULT_INTR)
374 __spu_trap_data_map(spu, dar, dsisr);
376 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
379 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
382 spu->class_1_dsisr = 0;
383 spu->class_1_dar = 0;
385 spin_unlock(&spu->register_lock);
387 return stat ? IRQ_HANDLED : IRQ_NONE;
391 spu_irq_class_2(int irq, void *data)
396 const int mailbox_intrs =
397 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
400 spin_lock(&spu->register_lock);
401 stat = spu_int_stat_get(spu, 2);
402 mask = spu_int_mask_get(spu, 2);
403 /* ignore interrupts we're not waiting for */
405 /* mailbox interrupts are level triggered. mask them now before
407 if (stat & mailbox_intrs)
408 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
409 /* acknowledge all interrupts before the callbacks */
410 spu_int_stat_clear(spu, 2, stat);
412 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
414 if (stat & CLASS2_MAILBOX_INTR)
415 spu->ibox_callback(spu);
417 if (stat & CLASS2_SPU_STOP_INTR)
418 spu->stop_callback(spu, 2);
420 if (stat & CLASS2_SPU_HALT_INTR)
421 spu->stop_callback(spu, 2);
423 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
424 spu->mfc_callback(spu);
426 if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
427 spu->wbox_callback(spu);
429 spu->stats.class2_intr++;
431 spin_unlock(&spu->register_lock);
433 return stat ? IRQ_HANDLED : IRQ_NONE;
436 static int spu_request_irqs(struct spu *spu)
440 if (spu->irqs[0] != NO_IRQ) {
441 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
443 ret = request_irq(spu->irqs[0], spu_irq_class_0,
449 if (spu->irqs[1] != NO_IRQ) {
450 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
452 ret = request_irq(spu->irqs[1], spu_irq_class_1,
458 if (spu->irqs[2] != NO_IRQ) {
459 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
461 ret = request_irq(spu->irqs[2], spu_irq_class_2,
470 if (spu->irqs[1] != NO_IRQ)
471 free_irq(spu->irqs[1], spu);
473 if (spu->irqs[0] != NO_IRQ)
474 free_irq(spu->irqs[0], spu);
479 static void spu_free_irqs(struct spu *spu)
481 if (spu->irqs[0] != NO_IRQ)
482 free_irq(spu->irqs[0], spu);
483 if (spu->irqs[1] != NO_IRQ)
484 free_irq(spu->irqs[1], spu);
485 if (spu->irqs[2] != NO_IRQ)
486 free_irq(spu->irqs[2], spu);
489 void spu_init_channels(struct spu *spu)
491 static const struct {
495 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
496 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
498 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
499 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
500 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
502 struct spu_priv2 __iomem *priv2;
507 /* initialize all channel data to zero */
508 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
511 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
512 for (count = 0; count < zero_list[i].count; count++)
513 out_be64(&priv2->spu_chnldata_RW, 0);
516 /* initialize channel counts to meaningful values */
517 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
518 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
519 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
522 EXPORT_SYMBOL_GPL(spu_init_channels);
524 static int spu_shutdown(struct sys_device *sysdev)
526 struct spu *spu = container_of(sysdev, struct spu, sysdev);
529 spu_destroy_spu(spu);
533 static struct sysdev_class spu_sysdev_class = {
535 .shutdown = spu_shutdown,
538 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
542 mutex_lock(&spu_full_list_mutex);
543 list_for_each_entry(spu, &spu_full_list, full_list)
544 sysdev_create_file(&spu->sysdev, attr);
545 mutex_unlock(&spu_full_list_mutex);
549 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
551 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
556 mutex_lock(&spu_full_list_mutex);
557 list_for_each_entry(spu, &spu_full_list, full_list) {
558 rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
560 /* we're in trouble here, but try unwinding anyway */
562 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
563 __func__, attrs->name);
565 list_for_each_entry_continue_reverse(spu,
566 &spu_full_list, full_list)
567 sysfs_remove_group(&spu->sysdev.kobj, attrs);
572 mutex_unlock(&spu_full_list_mutex);
576 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
579 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
583 mutex_lock(&spu_full_list_mutex);
584 list_for_each_entry(spu, &spu_full_list, full_list)
585 sysdev_remove_file(&spu->sysdev, attr);
586 mutex_unlock(&spu_full_list_mutex);
588 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
590 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
594 mutex_lock(&spu_full_list_mutex);
595 list_for_each_entry(spu, &spu_full_list, full_list)
596 sysfs_remove_group(&spu->sysdev.kobj, attrs);
597 mutex_unlock(&spu_full_list_mutex);
599 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
601 static int spu_create_sysdev(struct spu *spu)
605 spu->sysdev.id = spu->number;
606 spu->sysdev.cls = &spu_sysdev_class;
607 ret = sysdev_register(&spu->sysdev);
609 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
614 sysfs_add_device_to_node(&spu->sysdev, spu->node);
619 static int __init create_spu(void *data)
628 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
632 spu->alloc_state = SPU_FREE;
634 spin_lock_init(&spu->register_lock);
635 spin_lock(&spu_lock);
636 spu->number = number++;
637 spin_unlock(&spu_lock);
639 ret = spu_create_spu(spu, data);
644 spu_mfc_sdr_setup(spu);
645 spu_mfc_sr1_set(spu, 0x33);
646 ret = spu_request_irqs(spu);
650 ret = spu_create_sysdev(spu);
654 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
655 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
656 cbe_spu_info[spu->node].n_spus++;
657 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
659 mutex_lock(&spu_full_list_mutex);
660 spin_lock_irqsave(&spu_full_list_lock, flags);
661 list_add(&spu->full_list, &spu_full_list);
662 spin_unlock_irqrestore(&spu_full_list_lock, flags);
663 mutex_unlock(&spu_full_list_mutex);
665 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
667 spu->stats.tstamp = timespec_to_ns(&ts);
669 INIT_LIST_HEAD(&spu->aff_list);
676 spu_destroy_spu(spu);
683 static const char *spu_state_names[] = {
684 "user", "system", "iowait", "idle"
687 static unsigned long long spu_acct_time(struct spu *spu,
688 enum spu_utilization_state state)
691 unsigned long long time = spu->stats.times[state];
694 * If the spu is idle or the context is stopped, utilization
695 * statistics are not updated. Apply the time delta from the
696 * last recorded state of the spu.
698 if (spu->stats.util_state == state) {
700 time += timespec_to_ns(&ts) - spu->stats.tstamp;
703 return time / NSEC_PER_MSEC;
707 static ssize_t spu_stat_show(struct sys_device *sysdev,
708 struct sysdev_attribute *attr, char *buf)
710 struct spu *spu = container_of(sysdev, struct spu, sysdev);
712 return sprintf(buf, "%s %llu %llu %llu %llu "
713 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
714 spu_state_names[spu->stats.util_state],
715 spu_acct_time(spu, SPU_UTIL_USER),
716 spu_acct_time(spu, SPU_UTIL_SYSTEM),
717 spu_acct_time(spu, SPU_UTIL_IOWAIT),
718 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
719 spu->stats.vol_ctx_switch,
720 spu->stats.invol_ctx_switch,
725 spu->stats.class2_intr,
726 spu->stats.libassist);
729 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
733 struct crash_spu_info {
735 u32 saved_spu_runcntl_RW;
736 u32 saved_spu_status_R;
737 u32 saved_spu_npc_RW;
738 u64 saved_mfc_sr1_RW;
743 #define CRASH_NUM_SPUS 16 /* Enough for current hardware */
744 static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
746 static void crash_kexec_stop_spus(void)
752 for (i = 0; i < CRASH_NUM_SPUS; i++) {
753 if (!crash_spu_info[i].spu)
756 spu = crash_spu_info[i].spu;
758 crash_spu_info[i].saved_spu_runcntl_RW =
759 in_be32(&spu->problem->spu_runcntl_RW);
760 crash_spu_info[i].saved_spu_status_R =
761 in_be32(&spu->problem->spu_status_R);
762 crash_spu_info[i].saved_spu_npc_RW =
763 in_be32(&spu->problem->spu_npc_RW);
765 crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
766 crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
767 tmp = spu_mfc_sr1_get(spu);
768 crash_spu_info[i].saved_mfc_sr1_RW = tmp;
770 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
771 spu_mfc_sr1_set(spu, tmp);
777 static void crash_register_spus(struct list_head *list)
782 list_for_each_entry(spu, list, full_list) {
783 if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
786 crash_spu_info[spu->number].spu = spu;
789 ret = crash_shutdown_register(&crash_kexec_stop_spus);
791 printk(KERN_ERR "Could not register SPU crash handler");
795 static inline void crash_register_spus(struct list_head *list)
800 static int __init init_spu_base(void)
804 for (i = 0; i < MAX_NUMNODES; i++) {
805 mutex_init(&cbe_spu_info[i].list_mutex);
806 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
809 if (!spu_management_ops)
812 /* create sysdev class for spus */
813 ret = sysdev_class_register(&spu_sysdev_class);
817 ret = spu_enumerate_spus(create_spu);
820 printk(KERN_WARNING "%s: Error initializing spus\n",
822 goto out_unregister_sysdev_class;
826 fb_append_extra_logo(&logo_spe_clut224, ret);
828 mutex_lock(&spu_full_list_mutex);
829 xmon_register_spus(&spu_full_list);
830 crash_register_spus(&spu_full_list);
831 mutex_unlock(&spu_full_list_mutex);
832 spu_add_sysdev_attr(&attr_stat);
838 out_unregister_sysdev_class:
839 sysdev_class_unregister(&spu_sysdev_class);
843 module_init(init_spu_base);
845 MODULE_LICENSE("GPL");
846 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");