ARM: 7983/1: atomics: implement a better __atomic_add_unless for v6+
authorWill Deacon <will.deacon@arm.com>
Fri, 21 Feb 2014 16:01:48 +0000 (17:01 +0100)
committerGrazvydas Ignotas <notasas@gmail.com>
Thu, 2 Jul 2015 00:09:54 +0000 (03:09 +0300)
Looking at perf profiles of multi-threaded hackbench runs, a significant
performance hit appears to manifest from the cmpxchg loop used to
implement the 32-bit atomic_add_unless function. This can be mitigated
by writing a direct implementation of __atomic_add_unless which doesn't
require iteration outside of the atomic operation.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Conflicts:
arch/arm/include/asm/atomic.h

arch/arm/include/asm/atomic.h

index 955768c..b835147 100644 (file)
@@ -147,6 +147,32 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
        : "cc");
 }
 
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int oldval, newval;
+       unsigned long tmp;
+
+       smp_mb();
+
+       __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1:    ldrex   %0, [%4]\n"
+"      teq     %0, %5\n"
+"      beq     2f\n"
+"      add     %1, %0, %6\n"
+"      strex   %2, %1, [%4]\n"
+"      teq     %2, #0\n"
+"      bne     1b\n"
+"2:"
+       : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "r" (u), "r" (a)
+       : "cc");
+
+       if (oldval != u)
+               smp_mb();
+
+       return oldval;
+}
+
 #else /* ARM_ARCH_6 */
 
 #ifdef CONFIG_SMP
@@ -204,10 +230,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
        raw_local_irq_restore(flags);
 }
 
-#endif /* __LINUX_ARM_ARCH__ */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
@@ -218,6 +240,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
 #define atomic_inc(v)          atomic_add(1, v)
 #define atomic_dec(v)          atomic_sub(1, v)