2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
4 * Copyright 2007-2009 Analog Devices Inc.
5 * Philippe Gerum <rpm@xenomai.org>
7 * Licensed under the GPL-2.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/cpu.h>
21 #include <linux/smp.h>
22 #include <linux/cpumask.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/slab.h>
26 #include <asm/atomic.h>
27 #include <asm/cacheflush.h>
28 #include <asm/mmu_context.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/processor.h>
32 #include <asm/ptrace.h>
35 #include <linux/err.h>
39 * 05000120 - we always define corelock as 32-bit integer in L2
41 struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
43 #ifdef CONFIG_ICACHE_FLUSH_L1
44 unsigned long blackfin_iflush_l1_entry[NR_CPUS];
47 void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
48 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
49 *init_saved_dcplb_fault_addr_coreb;
51 #define BFIN_IPI_RESCHEDULE 0
52 #define BFIN_IPI_CALL_FUNC 1
53 #define BFIN_IPI_CPU_STOP 2
55 struct blackfin_flush_data {
60 void *secondary_stack;
63 struct smp_call_struct {
64 void (*func)(void *info);
70 static struct blackfin_flush_data smp_flush_data;
72 static DEFINE_SPINLOCK(stop_lock);
76 struct smp_call_struct call_struct;
79 /* A magic number - stress test shows this is safe for common cases */
80 #define BFIN_IPI_MSGQ_LEN 5
82 /* Simple FIFO buffer, overflow leads to panic */
83 struct ipi_message_queue {
86 unsigned long head; /* head of the queue */
87 struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
90 static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
92 static void ipi_cpu_stop(unsigned int cpu)
94 spin_lock(&stop_lock);
95 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
97 spin_unlock(&stop_lock);
99 cpu_clear(cpu, cpu_online_map);
107 static void ipi_flush_icache(void *info)
109 struct blackfin_flush_data *fdata = info;
111 /* Invalidate the memory holding the bounds of the flushed region. */
112 blackfin_dcache_invalidate_range((unsigned long)fdata,
113 (unsigned long)fdata + sizeof(*fdata));
115 /* Make sure all write buffers in the data side of the core
116 * are flushed before trying to invalidate the icache. This
117 * needs to be after the data flush and before the icache
118 * flush so that the SSYNC does the right thing in preventing
119 * the instruction prefetcher from hitting things in cached
120 * memory at the wrong time -- it runs much further ahead than
125 /* ipi_flaush_icache is invoked by generic flush_icache_range,
126 * so call blackfin arch icache flush directly here.
128 blackfin_icache_flush_range(fdata->start, fdata->end);
131 static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
134 void (*func)(void *info);
136 func = msg->call_struct.func;
137 info = msg->call_struct.info;
138 wait = msg->call_struct.wait;
141 #ifdef __ARCH_SYNC_CORE_DCACHE
143 * 'wait' usually means synchronization between CPUs.
144 * Invalidate D cache in case shared data was changed
145 * by func() to ensure cache coherence.
147 resync_core_dcache();
149 cpu_clear(cpu, *msg->call_struct.waitmask);
153 /* Use IRQ_SUPPLE_0 to request reschedule.
154 * When returning from interrupt to user space,
155 * there is chance to reschedule */
156 static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
158 unsigned int cpu = smp_processor_id();
160 platform_clear_ipi(cpu, IRQ_SUPPLE_0);
164 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
166 struct ipi_message *msg;
167 struct ipi_message_queue *msg_queue;
168 unsigned int cpu = smp_processor_id();
171 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
173 msg_queue = &__get_cpu_var(ipi_msg_queue);
175 spin_lock_irqsave(&msg_queue->lock, flags);
177 while (msg_queue->count) {
178 msg = &msg_queue->ipi_message[msg_queue->head];
180 case BFIN_IPI_RESCHEDULE:
183 case BFIN_IPI_CALL_FUNC:
184 spin_unlock_irqrestore(&msg_queue->lock, flags);
185 ipi_call_function(cpu, msg);
186 spin_lock_irqsave(&msg_queue->lock, flags);
188 case BFIN_IPI_CPU_STOP:
189 spin_unlock_irqrestore(&msg_queue->lock, flags);
191 spin_lock_irqsave(&msg_queue->lock, flags);
194 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
199 msg_queue->head %= BFIN_IPI_MSGQ_LEN;
202 spin_unlock_irqrestore(&msg_queue->lock, flags);
206 static void ipi_queue_init(void)
209 struct ipi_message_queue *msg_queue;
210 for_each_possible_cpu(cpu) {
211 msg_queue = &per_cpu(ipi_msg_queue, cpu);
212 spin_lock_init(&msg_queue->lock);
213 msg_queue->count = 0;
218 static inline void smp_send_message(cpumask_t callmap, unsigned long type,
219 void (*func) (void *info), void *info, int wait)
222 struct ipi_message_queue *msg_queue;
223 struct ipi_message *msg;
224 unsigned long flags, next_msg;
225 cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */
227 for_each_cpu_mask(cpu, callmap) {
228 msg_queue = &per_cpu(ipi_msg_queue, cpu);
229 spin_lock_irqsave(&msg_queue->lock, flags);
230 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
231 next_msg = (msg_queue->head + msg_queue->count)
233 msg = &msg_queue->ipi_message[next_msg];
235 if (type == BFIN_IPI_CALL_FUNC) {
236 msg->call_struct.func = func;
237 msg->call_struct.info = info;
238 msg->call_struct.wait = wait;
239 msg->call_struct.waitmask = &waitmask;
243 panic("IPI message queue overflow\n");
244 spin_unlock_irqrestore(&msg_queue->lock, flags);
245 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
249 while (!cpus_empty(waitmask))
250 blackfin_dcache_invalidate_range(
251 (unsigned long)(&waitmask),
252 (unsigned long)(&waitmask));
253 #ifdef __ARCH_SYNC_CORE_DCACHE
255 * Invalidate D cache in case shared data was changed by
256 * other processors to ensure cache coherence.
258 resync_core_dcache();
263 int smp_call_function(void (*func)(void *info), void *info, int wait)
268 callmap = cpu_online_map;
269 cpu_clear(smp_processor_id(), callmap);
270 if (!cpus_empty(callmap))
271 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
277 EXPORT_SYMBOL_GPL(smp_call_function);
279 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
282 unsigned int cpu = cpuid;
285 if (cpu_is_offline(cpu))
288 cpu_set(cpu, callmap);
290 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
294 EXPORT_SYMBOL_GPL(smp_call_function_single);
296 void smp_send_reschedule(int cpu)
298 /* simply trigger an ipi */
299 if (cpu_is_offline(cpu))
301 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
306 void smp_send_stop(void)
311 callmap = cpu_online_map;
312 cpu_clear(smp_processor_id(), callmap);
313 if (!cpus_empty(callmap))
314 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
321 int __cpuinit __cpu_up(unsigned int cpu)
324 static struct task_struct *idle;
329 idle = fork_idle(cpu);
331 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
332 return PTR_ERR(idle);
335 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
337 ret = platform_boot_secondary(cpu, idle);
339 secondary_stack = NULL;
344 static void __cpuinit setup_secondary(unsigned int cpu)
350 ilat = bfin_read_ILAT();
352 bfin_write_ILAT(ilat);
355 /* Enable interrupt levels IVG7-15. IARs have been already
356 * programmed by the boot CPU. */
357 bfin_irq_flags |= IMASK_IVG15 |
358 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
359 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
362 void __cpuinit secondary_start_kernel(void)
364 unsigned int cpu = smp_processor_id();
365 struct mm_struct *mm = &init_mm;
367 if (_bfin_swrst & SWRST_DBL_FAULT_B) {
368 printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
369 #ifdef CONFIG_DEBUG_DOUBLEFAULT
370 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
371 (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
372 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
373 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
375 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
380 * We want the D-cache to be enabled early, in case the atomic
381 * support code emulates cache coherence (see
382 * __ARCH_SYNC_CORE_DCACHE).
384 init_exception_vectors();
388 /* Attach the new idle task to the global mm. */
389 atomic_inc(&mm->mm_users);
390 atomic_inc(&mm->mm_count);
391 current->active_mm = mm;
395 setup_secondary(cpu);
397 platform_secondary_init(cpu);
399 /* setup local core timer */
400 bfin_local_timer_setup();
404 bfin_setup_caches(cpu);
407 * Calibrate loops per jiffy value.
408 * IRQs need to be enabled here - D-cache can be invalidated
409 * in timer irq handler, so core B can read correct jiffies.
416 void __init smp_prepare_boot_cpu(void)
420 void __init smp_prepare_cpus(unsigned int max_cpus)
422 platform_prepare_cpus(max_cpus);
424 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
425 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
428 void __init smp_cpus_done(unsigned int max_cpus)
430 unsigned long bogosum = 0;
433 for_each_online_cpu(cpu)
434 bogosum += loops_per_jiffy;
436 printk(KERN_INFO "SMP: Total of %d processors activated "
437 "(%lu.%02lu BogoMIPS).\n",
439 bogosum / (500000/HZ),
440 (bogosum / (5000/HZ)) % 100);
443 void smp_icache_flush_range_others(unsigned long start, unsigned long end)
445 smp_flush_data.start = start;
446 smp_flush_data.end = end;
448 if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
449 printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
451 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
453 #ifdef __ARCH_SYNC_CORE_ICACHE
454 unsigned long icache_invld_count[NR_CPUS];
455 void resync_core_icache(void)
457 unsigned int cpu = get_cpu();
458 blackfin_invalidate_entire_icache();
459 icache_invld_count[cpu]++;
462 EXPORT_SYMBOL(resync_core_icache);
465 #ifdef __ARCH_SYNC_CORE_DCACHE
466 unsigned long dcache_invld_count[NR_CPUS];
467 unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
469 void resync_core_dcache(void)
471 unsigned int cpu = get_cpu();
472 blackfin_invalidate_entire_dcache();
473 dcache_invld_count[cpu]++;
476 EXPORT_SYMBOL(resync_core_dcache);
479 #ifdef CONFIG_HOTPLUG_CPU
480 int __cpuexit __cpu_disable(void)
482 unsigned int cpu = smp_processor_id();
487 set_cpu_online(cpu, false);
491 static DECLARE_COMPLETION(cpu_killed);
493 int __cpuexit __cpu_die(unsigned int cpu)
495 return wait_for_completion_timeout(&cpu_killed, 5000);
500 complete(&cpu_killed);
502 atomic_dec(&init_mm.mm_users);
503 atomic_dec(&init_mm.mm_count);