Only a few core funcs need to be implemented for SMP systems, so allow the
arches to override them while getting the rest for free.
At least, this is enough to allow the Blackfin SMP port to use things.
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Cc: Arun Sharma <asharma@fb.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
#define __ASM_GENERIC_ATOMIC_H
#ifdef CONFIG_SMP
#define __ASM_GENERIC_ATOMIC_H
#ifdef CONFIG_SMP
+/* Force people to define core atomics */
+# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
+ !defined(atomic_clear_mask) || !defined(atomic_set_mask)
+# error "SMP requires a little arch-specific magic"
+# endif
*
* Atomically reads the value of @v.
*/
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
/**
* atomic_set - set atomic variable
*
* Atomically adds @i to @v and returns the result
*/
*
* Atomically adds @i to @v and returns the result
*/
+#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
/**
* atomic_sub_return - subtract integer from atomic variable
/**
* atomic_sub_return - subtract integer from atomic variable
*
* Atomically subtracts @i from @v and returns the result
*/
*
* Atomically subtracts @i from @v and returns the result
*/
+#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
static inline int atomic_add_negative(int i, atomic_t *v)
{
static inline int atomic_add_negative(int i, atomic_t *v)
{
*
* Atomically clears the bits set in @mask from @v
*/
*
* Atomically clears the bits set in @mask from @v
*/
+#ifndef atomic_clear_mask
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
v->counter &= mask;
raw_local_irq_restore(flags);
}
v->counter &= mask;
raw_local_irq_restore(flags);
}
/**
* atomic_set_mask - Atomically set bits in atomic variable
/**
* atomic_set_mask - Atomically set bits in atomic variable
*
* Atomically sets the bits set in @mask in @v
*/
*
* Atomically sets the bits set in @mask in @v
*/
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
v->counter |= mask;
raw_local_irq_restore(flags);
}
v->counter |= mask;
raw_local_irq_restore(flags);
}
/* Assume that atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
/* Assume that atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()