Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Mar 2011 15:22:41 +0000 (08:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Mar 2011 15:22:41 +0000 (08:22 -0700)
* 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu, x86: Add arch-specific this_cpu_cmpxchg_double() support
  percpu: Generic support for this_cpu_cmpxchg_double()
  alpha: use L1_CACHE_BYTES for cacheline size in the linker script
  percpu: align percpu readmostly subsection to cacheline

Fix up trivial conflict in arch/x86/kernel/vmlinux.lds.S due to the
percpu alignment having changed ("x86: Reduce back the alignment of the
per-CPU data section")

23 files changed:
arch/alpha/kernel/vmlinux.lds.S
arch/arm/kernel/vmlinux.lds.S
arch/blackfin/kernel/vmlinux.lds.S
arch/cris/kernel/vmlinux.lds.S
arch/frv/kernel/vmlinux.lds.S
arch/ia64/kernel/vmlinux.lds.S
arch/m32r/kernel/vmlinux.lds.S
arch/mips/kernel/vmlinux.lds.S
arch/mn10300/kernel/vmlinux.lds.S
arch/parisc/kernel/vmlinux.lds.S
arch/powerpc/kernel/vmlinux.lds.S
arch/s390/kernel/vmlinux.lds.S
arch/sh/kernel/vmlinux.lds.S
arch/sparc/kernel/vmlinux.lds.S
arch/tile/kernel/vmlinux.lds.S
arch/um/include/asm/common.lds.S
arch/x86/include/asm/percpu.h
arch/x86/kernel/vmlinux.lds.S
arch/x86/lib/Makefile
arch/x86/lib/cmpxchg16b_emu.S [new file with mode: 0644]
arch/xtensa/kernel/vmlinux.lds.S
include/asm-generic/vmlinux.lds.h
include/linux/percpu.h

index 003ef4c..433be2a 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/thread_info.h>
+#include <asm/cache.h>
 #include <asm/page.h>
 
 OUTPUT_FORMAT("elf64-alpha")
@@ -38,7 +39,7 @@ SECTIONS
        __init_begin = ALIGN(PAGE_SIZE);
        INIT_TEXT_SECTION(PAGE_SIZE)
        INIT_DATA_SECTION(16)
-       PERCPU(PAGE_SIZE)
+       PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
        /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
           needed for the THREAD_SIZE aligned init_task gets freed after init */
        . = ALIGN(THREAD_SIZE);
@@ -46,7 +47,7 @@ SECTIONS
        /* Freed after init ends here */
 
        _data = .;
-       RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+       RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
 
        .got : {
                *(.got)
index 6146279..28fea9b 100644 (file)
@@ -78,7 +78,7 @@ SECTIONS
 #endif
        }
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(32, PAGE_SIZE)
 
 #ifndef CONFIG_XIP_KERNEL
        . = ALIGN(PAGE_SIZE);
index 4122678..c40d07f 100644 (file)
@@ -136,7 +136,7 @@ SECTIONS
 
        . = ALIGN(16);
        INIT_DATA_SECTION(16)
-       PERCPU(4)
+       PERCPU(32, 4)
 
        .exit.data :
        {
index c49be84..728bbd9 100644 (file)
@@ -102,7 +102,7 @@ SECTIONS
 #endif
        __vmlinux_end = .;              /* Last address of the physical file. */
 #ifdef CONFIG_ETRAX_ARCH_V32
-       PERCPU(PAGE_SIZE)
+       PERCPU(32, PAGE_SIZE)
 
        .init.ramfs : {
                INIT_RAM_FS
index 8b973f3..0daae8a 100644 (file)
@@ -37,7 +37,7 @@ SECTIONS
   _einittext = .;
 
   INIT_DATA_SECTION(8)
-  PERCPU(4096)
+  PERCPU(L1_CACHE_BYTES, 4096)
 
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
index 5a4d044..787de4a 100644 (file)
@@ -198,7 +198,7 @@ SECTIONS {
 
        /* Per-cpu data: */
        . = ALIGN(PERCPU_PAGE_SIZE);
-       PERCPU_VADDR(PERCPU_ADDR, :percpu)
+       PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
        __phys_per_cpu_start = __per_cpu_load;
        /*
         * ensure percpu data fits
index 7da94ea..c194d64 100644 (file)
@@ -53,7 +53,7 @@ SECTIONS
   __init_begin = .;
   INIT_TEXT_SECTION(PAGE_SIZE)
   INIT_DATA_SECTION(16)
-  PERCPU(PAGE_SIZE)
+  PERCPU(32, PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
   /* freed after init ends here */
index 570607b..832afbb 100644 (file)
@@ -115,7 +115,7 @@ SECTIONS
                EXIT_DATA
        }
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
        . = ALIGN(PAGE_SIZE);
        __init_end = .;
        /* freed after init ends here */
index febbeee..968bcd2 100644 (file)
@@ -70,7 +70,7 @@ SECTIONS
        .exit.text : { EXIT_TEXT; }
        .exit.data : { EXIT_DATA; }
 
-  PERCPU(PAGE_SIZE)
+  PERCPU(32, PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
   /* freed after init ends here */
index d64a6bb..8f1e4ef 100644 (file)
@@ -145,7 +145,7 @@ SECTIONS
                EXIT_DATA
        }
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
        . = ALIGN(PAGE_SIZE);
        __init_end = .;
        /* freed after init ends here */
index 8a0deef..b9150f0 100644 (file)
@@ -160,7 +160,7 @@ SECTIONS
                INIT_RAM_FS
        }
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
 
        . = ALIGN(8);
        .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
index a68ac10..1bc18cd 100644 (file)
@@ -77,7 +77,7 @@ SECTIONS
        . = ALIGN(PAGE_SIZE);
        INIT_DATA_SECTION(0x100)
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(0x100, PAGE_SIZE)
        . = ALIGN(PAGE_SIZE);
        __init_end = .;         /* freed after init ends here */
 
index 7f8a709..af4d461 100644 (file)
@@ -66,7 +66,7 @@ SECTIONS
                __machvec_end = .;
        }
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
 
        /*
         * .exit.text is discarded at runtime, not link time, to deal with
index 0c1e678..92b557a 100644 (file)
@@ -108,7 +108,7 @@ SECTIONS
                __sun4v_2insn_patch_end = .;
        }
 
-       PERCPU(PAGE_SIZE)
+       PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
 
        . = ALIGN(PAGE_SIZE);
        __init_end = .;
index 25fdc0c..c6ce378 100644 (file)
@@ -63,7 +63,7 @@ SECTIONS
     *(.init.page)
   } :data =0
   INIT_DATA_SECTION(16)
-  PERCPU(PAGE_SIZE)
+  PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
   VMLINUX_SYMBOL(_einitdata) = .;
 
index ac55b9e..34bede8 100644 (file)
@@ -42,7 +42,7 @@
        INIT_SETUP(0)
   }
 
-  PERCPU(32)
+  PERCPU(32, 32)
        
   .initcall.init : {
        INIT_CALLS
index 7e17295..a09e1f0 100644 (file)
@@ -451,6 +451,26 @@ do {                                                                       \
 #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
 #endif /* !CONFIG_M386 */
 
+#ifdef CONFIG_X86_CMPXCHG64
+#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)                  \
+({                                                                     \
+       char __ret;                                                     \
+       typeof(o1) __o1 = o1;                                           \
+       typeof(o1) __n1 = n1;                                           \
+       typeof(o2) __o2 = o2;                                           \
+       typeof(o2) __n2 = n2;                                           \
+       typeof(o2) __dummy = n2;                                        \
+       asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t"       \
+                   : "=a"(__ret), "=m" (pcp1), "=d"(__dummy)           \
+                   :  "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2));     \
+       __ret;                                                          \
+})
+
+#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2)                percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2)          percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2)       percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#endif /* CONFIG_X86_CMPXCHG64 */
+
 /*
  * Per cpu atomic 64 bit operations are only available under 64 bit.
  * 32 bit must fall back to generic operations.
@@ -480,6 +500,34 @@ do {                                                                       \
 #define irqsafe_cpu_xor_8(pcp, val)    percpu_to_op("xor", (pcp), val)
 #define irqsafe_cpu_xchg_8(pcp, nval)  percpu_xchg_op(pcp, nval)
 #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+/*
+ * Pretty complex macro to generate cmpxchg16 instruction.  The instruction
+ * is not supported on early AMD64 processors so we must be able to emulate
+ * it in software.  The address used in the cmpxchg16 instruction must be
+ * aligned to a 16 byte boundary.
+ */
+#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)                 \
+({                                                                     \
+       char __ret;                                                     \
+       typeof(o1) __o1 = o1;                                           \
+       typeof(o1) __n1 = n1;                                           \
+       typeof(o2) __o2 = o2;                                           \
+       typeof(o2) __n2 = n2;                                           \
+       typeof(o2) __dummy;                                             \
+       alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4,      \
+                      "cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t",        \
+                      X86_FEATURE_CX16,                                \
+                      ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)),         \
+                      "S" (&pcp1), "b"(__n1), "c"(__n2),               \
+                      "a"(__o1), "d"(__o2));                           \
+       __ret;                                                          \
+})
+
+#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2)                percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2)          percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2)       percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
index 0381e1f..101c323 100644 (file)
@@ -231,7 +231,7 @@ SECTIONS
         * output PHDR, so the next output section - .init.text - should
         * start another segment - init.
         */
-       PERCPU_VADDR(0, :percpu)
+       PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
 #endif
 
        INIT_TEXT_SECTION(PAGE_SIZE)
@@ -306,7 +306,7 @@ SECTIONS
        }
 
 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
-       PERCPU(PAGE_SIZE)
+       PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
 #endif
 
        . = ALIGN(PAGE_SIZE);
index e10cf07..f2479f1 100644 (file)
@@ -42,4 +42,5 @@ else
         lib-y += memmove_64.o memset_64.o
         lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
        lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
+       lib-y += cmpxchg16b_emu.o
 endif
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
new file mode 100644 (file)
index 0000000..3e8b08a
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; version 2
+ *     of the License.
+ *
+ */
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+.text
+
+/*
+ * Inputs:
+ * %rsi : memory location to compare
+ * %rax : low 64 bits of old value
+ * %rdx : high 64 bits of old value
+ * %rbx : low 64 bits of new value
+ * %rcx : high 64 bits of new value
+ * %al  : Operation successful
+ */
+ENTRY(this_cpu_cmpxchg16b_emu)
+CFI_STARTPROC
+
+#
+# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
+# via the ZF.  Caller will access %al to get result.
+#
+# Note that this is only useful for a cpuops operation.  Meaning that we
+# do *not* have a fully atomic operation but just an operation that is
+# *atomic* on a single cpu (as provided by the this_cpu_xx class of
+# macros).
+#
+this_cpu_cmpxchg16b_emu:
+       pushf
+       cli
+
+       cmpq %gs:(%rsi), %rax
+       jne not_same
+       cmpq %gs:8(%rsi), %rdx
+       jne not_same
+
+       movq %rbx, %gs:(%rsi)
+       movq %rcx, %gs:8(%rsi)
+
+       popf
+       mov $1, %al
+       ret
+
+ not_same:
+       popf
+       xor %al,%al
+       ret
+
+CFI_ENDPROC
+
+ENDPROC(this_cpu_cmpxchg16b_emu)
index 9b52615..a282006 100644 (file)
@@ -155,7 +155,7 @@ SECTIONS
     INIT_RAM_FS
   }
 
-  PERCPU(PAGE_SIZE)
+  PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)
 
   /* We need this dummy segment here */
 
index 906c3ce..32c45e5 100644 (file)
@@ -15,7 +15,7 @@
  *     HEAD_TEXT_SECTION
  *     INIT_TEXT_SECTION(PAGE_SIZE)
  *     INIT_DATA_SECTION(...)
- *     PERCPU(PAGE_SIZE)
+ *     PERCPU(CACHELINE_SIZE, PAGE_SIZE)
  *     __init_end = .;
  *
  *     _stext = .;
 
 /**
  * PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
  * @vaddr: explicit base address (optional)
  * @phdr: destination PHDR (optional)
  *
- * Macro which expands to output section for percpu area.  If @vaddr
- * is not blank, it specifies explicit base address and all percpu
- * symbols will be offset from the given address.  If blank, @vaddr
- * always equals @laddr + LOAD_OFFSET.
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address.  If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
  *
  * @phdr defines the output PHDR to use if not blank.  Be warned that
  * output PHDR is sticky.  If @phdr is specified, the next output
  * If there is no need to put the percpu section at a predetermined
  * address, use PERCPU().
  */
-#define PERCPU_VADDR(vaddr, phdr)                                      \
+#define PERCPU_VADDR(cacheline, vaddr, phdr)                           \
        VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
        .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
                                - LOAD_OFFSET) {                        \
                *(.data..percpu..first)                                 \
                . = ALIGN(PAGE_SIZE);                                   \
                *(.data..percpu..page_aligned)                          \
+               . = ALIGN(cacheline);                                   \
                *(.data..percpu..readmostly)                            \
+               . = ALIGN(cacheline);                                   \
                *(.data..percpu)                                        \
                *(.data..percpu..shared_aligned)                        \
                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
 
 /**
  * PERCPU - define output section for percpu area, simple version
+ * @cacheline: cacheline size
  * @align: required alignment
  *
- * Align to @align and outputs output section for percpu area.  This
- * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * Align to @align and outputs output section for percpu area.  This macro
+ * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
  * __per_cpu_start will be identical.
  *
- * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
- * that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32
- * configuration.
+ * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
  */
-#define PERCPU(align)                                                  \
+#define PERCPU(cacheline, align)                                       \
        . = ALIGN(align);                                               \
        .data..percpu   : AT(ADDR(.data..percpu) - LOAD_OFFSET) {       \
                VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
                *(.data..percpu..first)                                 \
                . = ALIGN(PAGE_SIZE);                                   \
                *(.data..percpu..page_aligned)                          \
+               . = ALIGN(cacheline);                                   \
                *(.data..percpu..readmostly)                            \
+               . = ALIGN(cacheline);                                   \
                *(.data..percpu)                                        \
                *(.data..percpu..shared_aligned)                        \
                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
index 27c3c6f..3a5c444 100644 (file)
@@ -255,6 +255,30 @@ extern void __bad_size_call_parameter(void);
        pscr2_ret__;                                                    \
 })
 
+/*
+ * Special handling for cmpxchg_double.  cmpxchg_double is passed two
+ * percpu variables.  The first has to be aligned to a double word
+ * boundary and the second has to follow directly thereafter.
+ */
+#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)          \
+({                                                                     \
+       bool pdcrb_ret__;                                               \
+       __verify_pcpu_ptr(&pcp1);                                       \
+       BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));                     \
+       VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));         \
+       VM_BUG_ON((unsigned long)(&pcp2) !=                             \
+                 (unsigned long)(&pcp1) + sizeof(pcp1));               \
+       switch(sizeof(pcp1)) {                                          \
+       case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;  \
+       case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;  \
+       case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;  \
+       case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;  \
+       default:                                                        \
+               __bad_size_call_parameter(); break;                     \
+       }                                                               \
+       pdcrb_ret__;                                                    \
+})
+
 #define __pcpu_size_call(stem, variable, ...)                          \
 do {                                                                   \
        __verify_pcpu_ptr(&(variable));                                 \
@@ -500,6 +524,45 @@ do {                                                                       \
        __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
 #endif
 
+/*
+ * cmpxchg_double replaces two adjacent scalars at once.  The first
+ * two parameters are per cpu variables which have to be of the same
+ * size.  A truth value is returned to indicate success or failure
+ * (since a double register result is difficult to handle).  There is
+ * very limited hardware support for these operations, so only certain
+ * sizes may work.
+ */
+#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)       \
+({                                                                     \
+       int ret__;                                                      \
+       preempt_disable();                                              \
+       ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
+                       oval1, oval2, nval1, nval2);                    \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_cmpxchg_double
+# ifndef this_cpu_cmpxchg_double_1
+#  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
+       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_2
+#  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
+       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_4
+#  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
+       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef this_cpu_cmpxchg_double_8
+#  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
+       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)       \
+       __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
 /*
  * Generic percpu operations that do not require preemption handling.
  * Either we do not care about races or the caller has the
@@ -703,6 +766,39 @@ do {                                                                       \
        __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
 #endif
 
+#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)      \
+({                                                                     \
+       int __ret = 0;                                                  \
+       if (__this_cpu_read(pcp1) == (oval1) &&                         \
+                        __this_cpu_read(pcp2)  == (oval2)) {           \
+               __this_cpu_write(pcp1, (nval1));                        \
+               __this_cpu_write(pcp2, (nval2));                        \
+               __ret = 1;                                              \
+       }                                                               \
+       (__ret);                                                        \
+})
+
+#ifndef __this_cpu_cmpxchg_double
+# ifndef __this_cpu_cmpxchg_double_1
+#  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
+       __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_2
+#  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
+       __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_4
+#  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
+       __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef __this_cpu_cmpxchg_double_8
+#  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
+       __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
+       __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
 /*
  * IRQ safe versions of the per cpu RMW operations. Note that these operations
  * are *not* safe against modification of the same variable from another
@@ -823,4 +919,36 @@ do {                                                                       \
        __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
 #endif
 
+#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
+({                                                                     \
+       int ret__;                                                      \
+       unsigned long flags;                                            \
+       local_irq_save(flags);                                          \
+       ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
+                       oval1, oval2, nval1, nval2);                    \
+       local_irq_restore(flags);                                       \
+       ret__;                                                          \
+})
+
+#ifndef irqsafe_cpu_cmpxchg_double
+# ifndef irqsafe_cpu_cmpxchg_double_1
+#  define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_2
+#  define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_4
+#  define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_double_8
+#  define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+# endif
+# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
+       __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+#endif
+
 #endif /* __LINUX_PERCPU_H */