1 /* MN10300 System definitions
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
14 #include <asm/cpu-regs.h>
19 #include <linux/kernel.h>
25 struct task_struct *__switch_to(struct thread_struct *prev,
26 struct thread_struct *next,
27 struct task_struct *prev_task);
29 /* context switching is now performed out-of-line in switch_to.S */
30 #define switch_to(prev, next, last) \
32 current->thread.wchan = (u_long) __builtin_return_address(0); \
33 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
35 current->thread.wchan = 0; \
38 #define arch_align_stack(x) (x)
40 #define nop() asm volatile ("nop")
42 #endif /* !__ASSEMBLY__ */
45 * Force strict CPU ordering.
46 * And yes, this is required on UP too when we're talking
49 * For now, "wmb()" doesn't actually do anything, as all
50 * Intel CPU's follow what Intel calls a *Processor Order*,
51 * in which all writes are seen in the program order even
54 * I expect future Intel CPU's to have a weaker ordering,
55 * but I'd also expect them to finally get their act together
56 * and add some real memory barriers if so.
58 * Some non intel clones support out of order store. wmb() ceases to be a
62 #define mb() asm volatile ("": : :"memory")
64 #define wmb() asm volatile ("": : :"memory")
68 #define smp_rmb() rmb()
69 #define smp_wmb() wmb()
71 #define smp_mb() barrier()
72 #define smp_rmb() barrier()
73 #define smp_wmb() barrier()
76 #define set_mb(var, value) do { var = value; mb(); } while (0)
77 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
79 #define read_barrier_depends() do {} while (0)
80 #define smp_read_barrier_depends() do {} while (0)
82 /*****************************************************************************/
85 * - "disabled": run in IM1/2
86 * - level 0 - GDB stub
87 * - level 1 - virtual serial DMA (if present)
88 * - level 5 - normal interrupt priority
89 * - level 6 - timer interrupt
90 * - "enabled": run in IM7
92 #ifdef CONFIG_MN10300_TTYSM
93 #define MN10300_CLI_LEVEL EPSW_IM_2
95 #define MN10300_CLI_LEVEL EPSW_IM_1
98 #define local_save_flags(x) \
100 typecheck(unsigned long, x); \
107 #define local_irq_disable() \
116 : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \
120 #define local_irq_save(x) \
122 local_save_flags(x); \
123 local_irq_disable(); \
127 * we make sure local_irq_enable() doesn't cause priority inversion
131 extern unsigned long __mn10300_irq_enabled_epsw;
135 #define local_irq_enable() \
145 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
150 #define local_irq_restore(x) \
152 typecheck(unsigned long, x); \
164 #define irqs_disabled() \
166 unsigned long flags; \
167 local_save_flags(flags); \
168 (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \
171 /* hook to save power by halting the CPU
172 * - called from the idle loop
173 * - must reenable interrupts (which takes three instruction cycles to complete)
175 #define safe_halt() \
177 asm volatile(" or %0,epsw \n" \
182 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\
187 #define STI or EPSW_IE|EPSW_IM,epsw
188 #define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop
190 /*****************************************************************************/
192 * MN10300 doesn't actually have an exchange instruction
196 struct __xchg_dummy { unsigned long a[100]; };
197 #define __xg(x) ((struct __xchg_dummy *)(x))
200 unsigned long __xchg(volatile unsigned long *m, unsigned long val)
202 unsigned long retval;
205 local_irq_save(flags);
208 local_irq_restore(flags);
212 #define xchg(ptr, v) \
213 ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
216 static inline unsigned long __cmpxchg(volatile unsigned long *m,
217 unsigned long old, unsigned long new)
219 unsigned long retval;
222 local_irq_save(flags);
226 local_irq_restore(flags);
230 #define cmpxchg(ptr, o, n) \
231 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
232 (unsigned long)(o), \
235 #endif /* !__ASSEMBLY__ */
237 #endif /* __KERNEL__ */
238 #endif /* _ASM_SYSTEM_H */