2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Do not include directly; use <asm/atomic.h>.
17 #ifndef _ASM_TILE_ATOMIC_32_H
18 #define _ASM_TILE_ATOMIC_32_H
20 #include <arch/chip.h>
24 /* Tile-specific routines to support <asm/atomic.h>. */
25 int _atomic_xchg(atomic_t *v, int n);
26 int _atomic_xchg_add(atomic_t *v, int i);
27 int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
28 int _atomic_cmpxchg(atomic_t *v, int o, int n);
31 * atomic_xchg - atomically exchange contents of memory with a new value
32 * @v: pointer of type atomic_t
33 * @i: integer value to store in memory
35 * Atomically sets @v to @i and returns old @v
37 static inline int atomic_xchg(atomic_t *v, int n)
39 smp_mb(); /* barrier for proper semantics */
40 return _atomic_xchg(v, n);
44 * atomic_cmpxchg - atomically exchange contents of memory if it matches
45 * @v: pointer of type atomic_t
46 * @o: old value that memory should have
47 * @n: new value to write to memory if it matches
49 * Atomically checks if @v holds @o and replaces it with @n if so.
50 * Returns the old value at @v.
52 static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
54 smp_mb(); /* barrier for proper semantics */
55 return _atomic_cmpxchg(v, o, n);
59 * atomic_add - add integer to atomic variable
60 * @i: integer value to add
61 * @v: pointer of type atomic_t
63 * Atomically adds @i to @v.
65 static inline void atomic_add(int i, atomic_t *v)
67 _atomic_xchg_add(v, i);
71 * atomic_add_return - add integer and return
72 * @v: pointer of type atomic_t
73 * @i: integer value to add
75 * Atomically adds @i to @v and returns @i + @v
77 static inline int atomic_add_return(int i, atomic_t *v)
79 smp_mb(); /* barrier for proper semantics */
80 return _atomic_xchg_add(v, i) + i;
84 * atomic_add_unless - add unless the number is already a given value
85 * @v: pointer of type atomic_t
86 * @a: the amount to add to v...
87 * @u: ...unless v is equal to u.
89 * Atomically adds @a to @v, so long as @v was not already @u.
90 * Returns non-zero if @v was not @u, and zero otherwise.
92 static inline int atomic_add_unless(atomic_t *v, int a, int u)
94 smp_mb(); /* barrier for proper semantics */
95 return _atomic_xchg_add_unless(v, a, u) != u;
99 * atomic_set - set atomic variable
100 * @v: pointer of type atomic_t
103 * Atomically sets the value of @v to @i.
105 * atomic_set() can't be just a raw store, since it would be lost if it
106 * fell between the load and store of one of the other atomic ops.
108 static inline void atomic_set(atomic_t *v, int n)
113 #define xchg(ptr, x) ((typeof(*(ptr))) \
114 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
115 atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
116 __xchg_called_with_bad_pointer()))
118 #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
119 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
120 atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
121 __cmpxchg_called_with_bad_pointer()))
123 /* A 64bit atomic type */
126 u64 __aligned(8) counter;
129 #define ATOMIC64_INIT(val) { (val) }
131 u64 _atomic64_xchg(atomic64_t *v, u64 n);
132 u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
133 u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
134 u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
137 * atomic64_read - read atomic variable
138 * @v: pointer of type atomic64_t
140 * Atomically reads the value of @v.
142 static inline u64 atomic64_read(const atomic64_t *v)
145 * Requires an atomic op to read both 32-bit parts consistently.
146 * Casting away const is safe since the atomic support routines
147 * do not write to memory if the value has not been modified.
149 return _atomic64_xchg_add((atomic64_t *)v, 0);
153 * atomic64_xchg - atomically exchange contents of memory with a new value
154 * @v: pointer of type atomic64_t
155 * @i: integer value to store in memory
157 * Atomically sets @v to @i and returns old @v
159 static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
161 smp_mb(); /* barrier for proper semantics */
162 return _atomic64_xchg(v, n);
166 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
167 * @v: pointer of type atomic64_t
168 * @o: old value that memory should have
169 * @n: new value to write to memory if it matches
171 * Atomically checks if @v holds @o and replaces it with @n if so.
172 * Returns the old value at @v.
174 static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
176 smp_mb(); /* barrier for proper semantics */
177 return _atomic64_cmpxchg(v, o, n);
181 * atomic64_add - add integer to atomic variable
182 * @i: integer value to add
183 * @v: pointer of type atomic64_t
185 * Atomically adds @i to @v.
187 static inline void atomic64_add(u64 i, atomic64_t *v)
189 _atomic64_xchg_add(v, i);
193 * atomic64_add_return - add integer and return
194 * @v: pointer of type atomic64_t
195 * @i: integer value to add
197 * Atomically adds @i to @v and returns @i + @v
199 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
201 smp_mb(); /* barrier for proper semantics */
202 return _atomic64_xchg_add(v, i) + i;
206 * atomic64_add_unless - add unless the number is already a given value
207 * @v: pointer of type atomic64_t
208 * @a: the amount to add to v...
209 * @u: ...unless v is equal to u.
211 * Atomically adds @a to @v, so long as @v was not already @u.
212 * Returns non-zero if @v was not @u, and zero otherwise.
214 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
216 smp_mb(); /* barrier for proper semantics */
217 return _atomic64_xchg_add_unless(v, a, u) != u;
221 * atomic64_set - set atomic variable
222 * @v: pointer of type atomic64_t
225 * Atomically sets the value of @v to @i.
227 * atomic64_set() can't be just a raw store, since it would be lost if it
228 * fell between the load and store of one of the other atomic ops.
230 static inline void atomic64_set(atomic64_t *v, u64 n)
232 _atomic64_xchg(v, n);
235 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
236 #define atomic64_inc(v) atomic64_add(1LL, (v))
237 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
238 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
239 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
240 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
241 #define atomic64_sub(i, v) atomic64_add(-(i), (v))
242 #define atomic64_dec(v) atomic64_sub(1LL, (v))
243 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
244 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
245 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
248 * We need to barrier before modifying the word, since the _atomic_xxx()
249 * routines just tns the lock and then read/modify/write of the word.
250 * But after the word is updated, the routine issues an "mf" before returning,
251 * and since it's a function call, we don't even need a compiler barrier.
253 #define smp_mb__before_atomic_dec() smp_mb()
254 #define smp_mb__before_atomic_inc() smp_mb()
255 #define smp_mb__after_atomic_dec() do { } while (0)
256 #define smp_mb__after_atomic_inc() do { } while (0)
260 * Support "tns" atomic integers. These are atomic integers that can
261 * hold any value but "1". They are more efficient than regular atomic
262 * operations because the "lock" (aka acquire) step is a single "tns"
263 * in the uncontended case, and the "unlock" (aka release) step is a
264 * single "store" without an mf. (However, note that on tilepro the
265 * "tns" will evict the local cache line, so it's not all upside.)
267 * Note that you can ONLY observe the value stored in the pointer
268 * using these operations; a direct read of the value may confusingly
269 * return the special value "1".
272 int __tns_atomic_acquire(atomic_t *);
273 void __tns_atomic_release(atomic_t *p, int v);
275 static inline void tns_atomic_set(atomic_t *v, int i)
277 __tns_atomic_acquire(v);
278 __tns_atomic_release(v, i);
281 static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n)
283 int ret = __tns_atomic_acquire(v);
284 __tns_atomic_release(v, (ret == o) ? n : ret);
288 static inline int tns_atomic_xchg(atomic_t *v, int n)
290 int ret = __tns_atomic_acquire(v);
291 __tns_atomic_release(v, n);
295 #endif /* !__ASSEMBLY__ */
298 * Internal definitions only beyond this point.
301 #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
302 (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
304 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
306 /* Number of entries in atomic_lock_ptr[]. */
307 #define ATOMIC_HASH_L1_SHIFT 6
308 #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
310 /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
311 #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
312 #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
314 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
317 * Number of atomic locks in atomic_locks[]. Must be a power of two.
318 * There is no reason for more than PAGE_SIZE / 8 entries, since that
319 * is the maximum number of pointer bits we can use to index this.
320 * And we cannot have more than PAGE_SIZE / 4, since this has to
321 * fit on a single page and each entry takes 4 bytes.
323 #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
324 #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
327 extern int atomic_locks[];
330 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
333 * All the code that may fault while holding an atomic lock must
334 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
335 * can correctly release and reacquire the lock. Note that we
336 * mention the register number in a comment in "lib/atomic_asm.S" to help
337 * assembly coders from using this register by mistake, so if it
338 * is changed here, change that comment as well.
340 #define ATOMIC_LOCK_REG 20
341 #define ATOMIC_LOCK_REG_NAME r20
344 /* Called from setup to initialize a hash table to point to per_cpu locks. */
345 void __init_atomic_per_cpu(void);
348 /* Support releasing the atomic lock in do_page_fault_ics(). */
349 void __atomic_fault_unlock(int *lock_ptr);
352 /* Private helper routines in lib/atomic_asm_32.S */
353 extern struct __get_user __atomic_cmpxchg(volatile int *p,
354 int *lock, int o, int n);
355 extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
356 extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
357 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
358 int *lock, int o, int n);
359 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
360 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
361 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
362 extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
363 extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
364 extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
365 extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
366 int *lock, u64 o, u64 n);
368 #endif /* !__ASSEMBLY__ */
370 #endif /* _ASM_TILE_ATOMIC_32_H */