Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[pandora-kernel.git] / include / asm-mips / system.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10  * Copyright (C) 2000 MIPS Technologies, Inc.
11  */
12 #ifndef _ASM_SYSTEM_H
13 #define _ASM_SYSTEM_H
14
15 #include <linux/types.h>
16 #include <linux/irqflags.h>
17
18 #include <asm/addrspace.h>
19 #include <asm/barrier.h>
20 #include <asm/cpu-features.h>
21 #include <asm/dsp.h>
22 #include <asm/war.h>
23
24
25 /*
26  * switch_to(n) should switch tasks to task nr n, first
27  * checking that n isn't the current task, in which case it does nothing.
28  */
29 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
30
31 struct task_struct;
32
33 #ifdef CONFIG_MIPS_MT_FPAFF
34
35 /*
36  * Handle the scheduler resume end of FPU affinity management.  We do this
37  * inline to try to keep the overhead down. If we have been forced to run on
38  * a "CPU" with an FPU because of a previous high level of FP computation,
39  * but did not actually use the FPU during the most recent time-slice (CU1
40  * isn't set), we undo the restriction on cpus_allowed.
41  *
42  * We're not calling set_cpus_allowed() here, because we have no need to
43  * force prompt migration - we're already switching the current CPU to a
44  * different thread.
45  */
46
47 #define __mips_mt_fpaff_switch_to(prev)                                 \
48 do {                                                                    \
49         if (cpu_has_fpu &&                                              \
50             (prev->thread.mflags & MF_FPUBOUND) &&                      \
51              (!(KSTK_STATUS(prev) & ST0_CU1))) {                        \
52                 prev->thread.mflags &= ~MF_FPUBOUND;                    \
53                 prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
54         }                                                               \
55         next->thread.emulated_fp = 0;                                   \
56 } while(0)
57
58 #else
59 #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
60 #endif
61
62 #define switch_to(prev,next,last)                                       \
63 do {                                                                    \
64         __mips_mt_fpaff_switch_to(prev);                                \
65         if (cpu_has_dsp)                                                \
66                 __save_dsp(prev);                                       \
67         (last) = resume(prev, next, task_thread_info(next));            \
68         if (cpu_has_dsp)                                                \
69                 __restore_dsp(current);                                 \
70         if (cpu_has_userlocal)                                          \
71                 write_c0_userlocal(task_thread_info(current)->tp_value);\
72 } while(0)
73
74 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
75 {
76         __u32 retval;
77
78         if (cpu_has_llsc && R10000_LLSC_WAR) {
79                 unsigned long dummy;
80
81                 __asm__ __volatile__(
82                 "       .set    mips3                                   \n"
83                 "1:     ll      %0, %3                  # xchg_u32      \n"
84                 "       .set    mips0                                   \n"
85                 "       move    %2, %z4                                 \n"
86                 "       .set    mips3                                   \n"
87                 "       sc      %2, %1                                  \n"
88                 "       beqzl   %2, 1b                                  \n"
89                 "       .set    mips0                                   \n"
90                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
91                 : "R" (*m), "Jr" (val)
92                 : "memory");
93         } else if (cpu_has_llsc) {
94                 unsigned long dummy;
95
96                 __asm__ __volatile__(
97                 "       .set    mips3                                   \n"
98                 "1:     ll      %0, %3                  # xchg_u32      \n"
99                 "       .set    mips0                                   \n"
100                 "       move    %2, %z4                                 \n"
101                 "       .set    mips3                                   \n"
102                 "       sc      %2, %1                                  \n"
103                 "       beqz    %2, 2f                                  \n"
104                 "       .subsection 2                                   \n"
105                 "2:     b       1b                                      \n"
106                 "       .previous                                       \n"
107                 "       .set    mips0                                   \n"
108                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
109                 : "R" (*m), "Jr" (val)
110                 : "memory");
111         } else {
112                 unsigned long flags;
113
114                 raw_local_irq_save(flags);
115                 retval = *m;
116                 *m = val;
117                 raw_local_irq_restore(flags);   /* implies memory barrier  */
118         }
119
120         smp_mb();
121
122         return retval;
123 }
124
125 #ifdef CONFIG_64BIT
126 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
127 {
128         __u64 retval;
129
130         if (cpu_has_llsc && R10000_LLSC_WAR) {
131                 unsigned long dummy;
132
133                 __asm__ __volatile__(
134                 "       .set    mips3                                   \n"
135                 "1:     lld     %0, %3                  # xchg_u64      \n"
136                 "       move    %2, %z4                                 \n"
137                 "       scd     %2, %1                                  \n"
138                 "       beqzl   %2, 1b                                  \n"
139                 "       .set    mips0                                   \n"
140                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
141                 : "R" (*m), "Jr" (val)
142                 : "memory");
143         } else if (cpu_has_llsc) {
144                 unsigned long dummy;
145
146                 __asm__ __volatile__(
147                 "       .set    mips3                                   \n"
148                 "1:     lld     %0, %3                  # xchg_u64      \n"
149                 "       move    %2, %z4                                 \n"
150                 "       scd     %2, %1                                  \n"
151                 "       beqz    %2, 2f                                  \n"
152                 "       .subsection 2                                   \n"
153                 "2:     b       1b                                      \n"
154                 "       .previous                                       \n"
155                 "       .set    mips0                                   \n"
156                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
157                 : "R" (*m), "Jr" (val)
158                 : "memory");
159         } else {
160                 unsigned long flags;
161
162                 raw_local_irq_save(flags);
163                 retval = *m;
164                 *m = val;
165                 raw_local_irq_restore(flags);   /* implies memory barrier  */
166         }
167
168         smp_mb();
169
170         return retval;
171 }
172 #else
173 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
174 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
175 #endif
176
177 /* This function doesn't exist, so you'll get a linker error
178    if something tries to do an invalid xchg().  */
179 extern void __xchg_called_with_bad_pointer(void);
180
181 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
182 {
183         switch (size) {
184         case 4:
185                 return __xchg_u32(ptr, x);
186         case 8:
187                 return __xchg_u64(ptr, x);
188         }
189         __xchg_called_with_bad_pointer();
190         return x;
191 }
192
193 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
194
195 #define __HAVE_ARCH_CMPXCHG 1
196
197 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
198         unsigned long new)
199 {
200         __u32 retval;
201
202         if (cpu_has_llsc && R10000_LLSC_WAR) {
203                 __asm__ __volatile__(
204                 "       .set    push                                    \n"
205                 "       .set    noat                                    \n"
206                 "       .set    mips3                                   \n"
207                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
208                 "       bne     %0, %z3, 2f                             \n"
209                 "       .set    mips0                                   \n"
210                 "       move    $1, %z4                                 \n"
211                 "       .set    mips3                                   \n"
212                 "       sc      $1, %1                                  \n"
213                 "       beqzl   $1, 1b                                  \n"
214                 "2:                                                     \n"
215                 "       .set    pop                                     \n"
216                 : "=&r" (retval), "=R" (*m)
217                 : "R" (*m), "Jr" (old), "Jr" (new)
218                 : "memory");
219         } else if (cpu_has_llsc) {
220                 __asm__ __volatile__(
221                 "       .set    push                                    \n"
222                 "       .set    noat                                    \n"
223                 "       .set    mips3                                   \n"
224                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
225                 "       bne     %0, %z3, 2f                             \n"
226                 "       .set    mips0                                   \n"
227                 "       move    $1, %z4                                 \n"
228                 "       .set    mips3                                   \n"
229                 "       sc      $1, %1                                  \n"
230                 "       beqz    $1, 3f                                  \n"
231                 "2:                                                     \n"
232                 "       .subsection 2                                   \n"
233                 "3:     b       1b                                      \n"
234                 "       .previous                                       \n"
235                 "       .set    pop                                     \n"
236                 : "=&r" (retval), "=R" (*m)
237                 : "R" (*m), "Jr" (old), "Jr" (new)
238                 : "memory");
239         } else {
240                 unsigned long flags;
241
242                 raw_local_irq_save(flags);
243                 retval = *m;
244                 if (retval == old)
245                         *m = new;
246                 raw_local_irq_restore(flags);   /* implies memory barrier  */
247         }
248
249         smp_mb();
250
251         return retval;
252 }
253
254 static inline unsigned long __cmpxchg_u32_local(volatile int * m,
255         unsigned long old, unsigned long new)
256 {
257         __u32 retval;
258
259         if (cpu_has_llsc && R10000_LLSC_WAR) {
260                 __asm__ __volatile__(
261                 "       .set    push                                    \n"
262                 "       .set    noat                                    \n"
263                 "       .set    mips3                                   \n"
264                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
265                 "       bne     %0, %z3, 2f                             \n"
266                 "       .set    mips0                                   \n"
267                 "       move    $1, %z4                                 \n"
268                 "       .set    mips3                                   \n"
269                 "       sc      $1, %1                                  \n"
270                 "       beqzl   $1, 1b                                  \n"
271                 "2:                                                     \n"
272                 "       .set    pop                                     \n"
273                 : "=&r" (retval), "=R" (*m)
274                 : "R" (*m), "Jr" (old), "Jr" (new)
275                 : "memory");
276         } else if (cpu_has_llsc) {
277                 __asm__ __volatile__(
278                 "       .set    push                                    \n"
279                 "       .set    noat                                    \n"
280                 "       .set    mips3                                   \n"
281                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
282                 "       bne     %0, %z3, 2f                             \n"
283                 "       .set    mips0                                   \n"
284                 "       move    $1, %z4                                 \n"
285                 "       .set    mips3                                   \n"
286                 "       sc      $1, %1                                  \n"
287                 "       beqz    $1, 1b                                  \n"
288                 "2:                                                     \n"
289                 "       .set    pop                                     \n"
290                 : "=&r" (retval), "=R" (*m)
291                 : "R" (*m), "Jr" (old), "Jr" (new)
292                 : "memory");
293         } else {
294                 unsigned long flags;
295
296                 local_irq_save(flags);
297                 retval = *m;
298                 if (retval == old)
299                         *m = new;
300                 local_irq_restore(flags);       /* implies memory barrier  */
301         }
302
303         return retval;
304 }
305
306 #ifdef CONFIG_64BIT
307 static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
308         unsigned long new)
309 {
310         __u64 retval;
311
312         if (cpu_has_llsc && R10000_LLSC_WAR) {
313                 __asm__ __volatile__(
314                 "       .set    push                                    \n"
315                 "       .set    noat                                    \n"
316                 "       .set    mips3                                   \n"
317                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
318                 "       bne     %0, %z3, 2f                             \n"
319                 "       move    $1, %z4                                 \n"
320                 "       scd     $1, %1                                  \n"
321                 "       beqzl   $1, 1b                                  \n"
322                 "2:                                                     \n"
323                 "       .set    pop                                     \n"
324                 : "=&r" (retval), "=R" (*m)
325                 : "R" (*m), "Jr" (old), "Jr" (new)
326                 : "memory");
327         } else if (cpu_has_llsc) {
328                 __asm__ __volatile__(
329                 "       .set    push                                    \n"
330                 "       .set    noat                                    \n"
331                 "       .set    mips3                                   \n"
332                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
333                 "       bne     %0, %z3, 2f                             \n"
334                 "       move    $1, %z4                                 \n"
335                 "       scd     $1, %1                                  \n"
336                 "       beqz    $1, 3f                                  \n"
337                 "2:                                                     \n"
338                 "       .subsection 2                                   \n"
339                 "3:     b       1b                                      \n"
340                 "       .previous                                       \n"
341                 "       .set    pop                                     \n"
342                 : "=&r" (retval), "=R" (*m)
343                 : "R" (*m), "Jr" (old), "Jr" (new)
344                 : "memory");
345         } else {
346                 unsigned long flags;
347
348                 raw_local_irq_save(flags);
349                 retval = *m;
350                 if (retval == old)
351                         *m = new;
352                 raw_local_irq_restore(flags);   /* implies memory barrier  */
353         }
354
355         smp_mb();
356
357         return retval;
358 }
359
360 static inline unsigned long __cmpxchg_u64_local(volatile int * m,
361         unsigned long old, unsigned long new)
362 {
363         __u64 retval;
364
365         if (cpu_has_llsc && R10000_LLSC_WAR) {
366                 __asm__ __volatile__(
367                 "       .set    push                                    \n"
368                 "       .set    noat                                    \n"
369                 "       .set    mips3                                   \n"
370                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
371                 "       bne     %0, %z3, 2f                             \n"
372                 "       move    $1, %z4                                 \n"
373                 "       scd     $1, %1                                  \n"
374                 "       beqzl   $1, 1b                                  \n"
375                 "2:                                                     \n"
376                 "       .set    pop                                     \n"
377                 : "=&r" (retval), "=R" (*m)
378                 : "R" (*m), "Jr" (old), "Jr" (new)
379                 : "memory");
380         } else if (cpu_has_llsc) {
381                 __asm__ __volatile__(
382                 "       .set    push                                    \n"
383                 "       .set    noat                                    \n"
384                 "       .set    mips3                                   \n"
385                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
386                 "       bne     %0, %z3, 2f                             \n"
387                 "       move    $1, %z4                                 \n"
388                 "       scd     $1, %1                                  \n"
389                 "       beqz    $1, 1b                                  \n"
390                 "2:                                                     \n"
391                 "       .set    pop                                     \n"
392                 : "=&r" (retval), "=R" (*m)
393                 : "R" (*m), "Jr" (old), "Jr" (new)
394                 : "memory");
395         } else {
396                 unsigned long flags;
397
398                 local_irq_save(flags);
399                 retval = *m;
400                 if (retval == old)
401                         *m = new;
402                 local_irq_restore(flags);       /* implies memory barrier  */
403         }
404
405         return retval;
406 }
407
408 #else
409 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
410         volatile int * m, unsigned long old, unsigned long new);
411 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
412 extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
413         volatile int * m, unsigned long old, unsigned long new);
414 #define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
415 #endif
416
417 /* This function doesn't exist, so you'll get a linker error
418    if something tries to do an invalid cmpxchg().  */
419 extern void __cmpxchg_called_with_bad_pointer(void);
420
421 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
422         unsigned long new, int size)
423 {
424         switch (size) {
425         case 4:
426                 return __cmpxchg_u32(ptr, old, new);
427         case 8:
428                 return __cmpxchg_u64(ptr, old, new);
429         }
430         __cmpxchg_called_with_bad_pointer();
431         return old;
432 }
433
434 static inline unsigned long __cmpxchg_local(volatile void * ptr,
435         unsigned long old, unsigned long new, int size)
436 {
437         switch (size) {
438         case 4:
439                 return __cmpxchg_u32_local(ptr, old, new);
440         case 8:
441                 return __cmpxchg_u64_local(ptr, old, new);
442         }
443         __cmpxchg_called_with_bad_pointer();
444         return old;
445 }
446
447 #define cmpxchg(ptr,old,new) \
448         ((__typeof__(*(ptr)))__cmpxchg((ptr), \
449                 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
450
451 #define cmpxchg_local(ptr,old,new) \
452         ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
453                 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
454
455 extern void set_handler (unsigned long offset, void *addr, unsigned long len);
456 extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
457
458 typedef void (*vi_handler_t)(void);
459 extern void *set_vi_handler (int n, vi_handler_t addr);
460
461 extern void *set_except_vector(int n, void *addr);
462 extern unsigned long ebase;
463 extern void per_cpu_trap_init(void);
464
465 extern int stop_a_enabled;
466
467 /*
468  * See include/asm-ia64/system.h; prevents deadlock on SMP
469  * systems.
470  */
471 #define __ARCH_WANT_UNLOCKED_CTXSW
472
473 #define arch_align_stack(x) (x)
474
475 #endif /* _ASM_SYSTEM_H */