2 * Atomic operations for the Hexagon architecture
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include <linux/types.h>
27 #define ATOMIC_INIT(i) { (i) }
28 #define atomic_set(v, i) ((v)->counter = (i))
31 * atomic_read - reads a word, atomically
32 * @v: pointer to atomic value
34 * Assumes all word reads on our architecture are atomic.
36 #define atomic_read(v) ((v)->counter)
39 * atomic_xchg - atomic
40 * @v: pointer to memory to change
41 * @new: new value (technically passed in a register -- see xchg)
43 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
47 * atomic_cmpxchg - atomic compare-and-exchange values
48 * @v: pointer to value to change
49 * @old: desired old value to match
50 * @new: new value to put in
52 * Parameters are then pointer, value-in-register, value-in-register,
53 * and the output is the old value.
55 * Apparently this is complicated for archs that don't support
56 * the memw_locked like we do (or it's broken or whatever).
58 * Kind of the lynchpin of the rest of the generically defined routines.
59 * Remember V2 had that bug with dotnew predicate set by memw_locked.
61 * "old" is "expected" old val, __oldval is actual old value
63 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
68 "1: %0 = memw_locked(%1);\n"
69 " { P0 = cmp.eq(%0,%2);\n"
70 " if (!P0.new) jump:nt 2f; }\n"
71 " memw_locked(%1,P0) = %3;\n"
72 " if (!P0) jump 1b;\n"
75 : "r" (&v->counter), "r" (old), "r" (new)
82 static inline int atomic_add_return(int i, atomic_t *v)
86 __asm__ __volatile__ (
87 "1: %0 = memw_locked(%1);\n"
89 " memw_locked(%1,P3)=%0;\n"
92 : "r" (&v->counter), "r" (i)
99 #define atomic_add(i, v) atomic_add_return(i, (v))
101 static inline int atomic_sub_return(int i, atomic_t *v)
104 __asm__ __volatile__ (
105 "1: %0 = memw_locked(%1);\n"
106 " %0 = sub(%0,%2);\n"
107 " memw_locked(%1,P3)=%0\n"
110 : "r" (&v->counter), "r" (i)
116 #define atomic_sub(i, v) atomic_sub_return(i, (v))
119 * atomic_add_unless - add unless the number is a given value
120 * @v: pointer to value
122 * @u: unless value is equal to u
124 * Returns 1 if the add happened, 0 if it didn't.
126 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
128 int output, __oldval;
130 "1: %0 = memw_locked(%2);"
132 " p3 = cmp.eq(%0, %4);"
133 " if (p3.new) jump:nt 2f;"
137 " memw_locked(%2, p3) = %0;"
143 : "=&r" (__oldval), "=&r" (output)
144 : "r" (v), "r" (a), "r" (u)
150 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
152 #define atomic_inc(v) atomic_add(1, (v))
153 #define atomic_dec(v) atomic_sub(1, (v))
155 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
156 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
157 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
158 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
161 #define atomic_inc_return(v) (atomic_add_return(1, v))
162 #define atomic_dec_return(v) (atomic_sub_return(1, v))