Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Apr 2008 23:47:54 +0000 (16:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Apr 2008 23:47:54 +0000 (16:47 -0700)
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl
  [IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
  [IA64] run some drivers/misc/sgi-xp through scripts/Lindent
  [IA64] move XP and XPC to drivers/misc/sgi-xp
  [IA64] minor irq handler cleanups
  [IA64] simplify notify hooks in mca.c
  [IA64] do notify DIE_MCA_MONARCH_PROCESS for each monarchs
  [IA64] disable interrupts on exit of ia64_trace_syscall

20 files changed:
arch/ia64/Kconfig
arch/ia64/kernel/crash.c
arch/ia64/kernel/entry.S
arch/ia64/kernel/mca.c
arch/ia64/kernel/perfmon.c
arch/ia64/sn/kernel/Makefile
arch/ia64/sn/kernel/huberror.c
arch/ia64/sn/pci/tioce_provider.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/sgi-xp/Makefile [new file with mode: 0644]
drivers/misc/sgi-xp/xp.h [moved from include/asm-ia64/sn/xp.h with 91% similarity]
drivers/misc/sgi-xp/xp_main.c [moved from arch/ia64/sn/kernel/xp_main.c with 69% similarity]
drivers/misc/sgi-xp/xp_nofault.S [moved from arch/ia64/sn/kernel/xp_nofault.S with 95% similarity]
drivers/misc/sgi-xp/xpc.h [moved from include/asm-ia64/sn/xpc.h with 75% similarity]
drivers/misc/sgi-xp/xpc_channel.c [moved from arch/ia64/sn/kernel/xpc_channel.c with 87% similarity]
drivers/misc/sgi-xp/xpc_main.c [moved from arch/ia64/sn/kernel/xpc_main.c with 82% similarity]
drivers/misc/sgi-xp/xpc_partition.c [moved from arch/ia64/sn/kernel/xpc_partition.c with 79% similarity]
drivers/misc/sgi-xp/xpnet.c [moved from arch/ia64/sn/kernel/xpnet.c with 89% similarity]
include/asm-ia64/mca.h

index ed21737..cd13e13 100644 (file)
@@ -266,17 +266,6 @@ config IOSAPIC
        depends on !IA64_HP_SIM
        default y
 
-config IA64_SGI_SN_XP
-       tristate "Support communication between SGI SSIs"
-       depends on IA64_GENERIC || IA64_SGI_SN2
-       select IA64_UNCACHED_ALLOCATOR
-       help
-         An SGI machine can be divided into multiple Single System
-         Images which act independently of each other and have
-         hardware based memory protection from the others.  Enabling
-         this feature will allow for direct communication between SSIs
-         based on a network adapter and DMA messaging.
-
 config FORCE_MAX_ZONEORDER
        int "MAX_ORDER (11 - 17)"  if !HUGETLB_PAGE
        range 11 17  if !HUGETLB_PAGE
index 90ef338..f065093 100644 (file)
@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
                        unw_init_running(kdump_cpu_freeze, NULL);
                break;
        case DIE_MCA_MONARCH_LEAVE:
-               /* die_register->signr indicate if MCA is recoverable */
-               if (kdump_on_fatal_mca && !args->signr) {
+               /* *(nd->data) indicate if MCA is recoverable */
+               if (kdump_on_fatal_mca && !(*(nd->data))) {
                        atomic_set(&kdump_in_progress, 1);
                        *(nd->monarch_cpu) = -1;
                        machine_kdump_on_init();
index b0be4a2..e49ad8c 100644 (file)
@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
        br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
 .ret3:
 (pUStk)        cmp.eq.unc p6,p0=r0,r0                  // p6 <- pUStk
+(pUStk)        rsm psr.i                               // disable interrupts
        br.cond.sptk .work_pending_syscall_end
 
 strace_error:
index e51bced..705176b 100644 (file)
 # define IA64_MCA_DEBUG(fmt...)
 #endif
 
+#define NOTIFY_INIT(event, regs, arg, spin)                            \
+do {                                                                   \
+       if ((notify_die((event), "INIT", (regs), (arg), 0, 0)           \
+                       == NOTIFY_STOP) && ((spin) == 1))               \
+               ia64_mca_spin(__func__);                                \
+} while (0)
+
+#define NOTIFY_MCA(event, regs, arg, spin)                             \
+do {                                                                   \
+       if ((notify_die((event), "MCA", (regs), (arg), 0, 0)            \
+                       == NOTIFY_STOP) && ((spin) == 1))               \
+               ia64_mca_spin(__func__);                                \
+} while (0)
+
 /* Used by mca_asm.S */
 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
 
        /* Mask all interrupts */
        local_irq_save(flags);
-       if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
-                      (long)&nd, 0, 0) == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+
+       NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
 
        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
        /* Register with the SAL monarch that the slave has
@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
         */
        ia64_sal_mc_rendez();
 
-       if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
-                      (long)&nd, 0, 0) == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+       NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
 
        /* Wait for the monarch cpu to exit. */
        while (monarch_cpu != -1)
               cpu_relax();     /* spin until monarch leaves */
 
-       if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
-                      (long)&nd, 0, 0) == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+       NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
 
        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
        /* Enable all interrupts */
@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
        int recover, cpu = smp_processor_id();
        struct task_struct *previous_current;
        struct ia64_mca_notify_die nd =
-               { .sos = sos, .monarch_cpu = &monarch_cpu };
+               { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
        static atomic_t mca_count;
        static cpumask_t mca_cpu;
 
@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
 
        previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
 
-       if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
-                       == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+       NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
 
        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
        if (sos->monarch) {
@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
                 * does not work.
                 */
                ia64_mca_wakeup_all();
-               if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
-                               == NOTIFY_STOP)
-                       ia64_mca_spin(__func__);
        } else {
                while (cpu_isset(cpu, mca_cpu))
                        cpu_relax();    /* spin until monarch wakes us */
-        }
+       }
+
+       NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
 
        /* Get the MCA error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
                mca_insert_tr(0x2); /*Reload dynamic itrs*/
        }
 
-       if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
-                       == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+       NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
 
        if (atomic_dec_return(&mca_count) > 0) {
                int i;
@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
        struct ia64_mca_notify_die nd =
                { .sos = sos, .monarch_cpu = &monarch_cpu };
 
-       (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
+       NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
 
        mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
                sos->proc_state_param, cpu, sos->monarch);
@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
                ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
                while (monarch_cpu == -1)
                       cpu_relax();     /* spin until monarch enters */
-               if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
-                               == NOTIFY_STOP)
-                       ia64_mca_spin(__func__);
-               if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
-                               == NOTIFY_STOP)
-                       ia64_mca_spin(__func__);
+
+               NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
+               NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
+
                while (monarch_cpu != -1)
                       cpu_relax();     /* spin until monarch leaves */
-               if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
-                               == NOTIFY_STOP)
-                       ia64_mca_spin(__func__);
+
+               NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
+
                mprintk("Slave on cpu %d returning to normal service.\n", cpu);
                set_curr_task(cpu, previous_current);
                ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
        }
 
        monarch_cpu = cpu;
-       if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
-                       == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+       NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
 
        /*
         * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
         * to default_monarch_init_process() above and just print all the
         * tasks.
         */
-       if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
-                       == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
-       if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
-                       == NOTIFY_STOP)
-               ia64_mca_spin(__func__);
+       NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
+       NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
+
        mprintk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
        atomic_dec(&monarchs);
        set_curr_task(cpu, previous_current);
@@ -1953,7 +1950,7 @@ ia64_mca_init(void)
                        printk(KERN_INFO "Increasing MCA rendezvous timeout from "
                                "%ld to %ld milliseconds\n", timeout, isrv.v0);
                        timeout = isrv.v0;
-                       (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
+                       NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
                        continue;
                }
                printk(KERN_ERR "Failed to register rendezvous interrupt "
index d1d24f4..c8e4037 100644 (file)
@@ -5511,7 +5511,7 @@ stop_monitoring:
 }
 
 static int
-pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
+pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
 {
        struct task_struct *task;
        pfm_context_t *ctx;
@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg)
 
                start_cycles = ia64_get_itc();
 
-               ret = pfm_do_interrupt_handler(irq, arg, regs);
+               ret = pfm_do_interrupt_handler(arg, regs);
 
                total_cycles = ia64_get_itc();
 
index 688a3c2..0591038 100644 (file)
@@ -4,7 +4,7 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 #
-# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc.  All Rights Reserved.
+# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc.  All Rights Reserved.
 #
 
 EXTRA_CFLAGS += -Iarch/ia64/sn/include
@@ -15,9 +15,4 @@ obj-y                         += setup.o bte.o bte_error.o irq.o mca.o idle.o \
                                   sn2/
 obj-$(CONFIG_IA64_GENERIC)      += machvec.o
 obj-$(CONFIG_SGI_TIOCX)                += tiocx.o
-obj-$(CONFIG_IA64_SGI_SN_XP)   += xp.o
-xp-y                           := xp_main.o xp_nofault.o
-obj-$(CONFIG_IA64_SGI_SN_XP)   += xpc.o
-xpc-y                          := xpc_main.o xpc_channel.o xpc_partition.o
-obj-$(CONFIG_IA64_SGI_SN_XP)   += xpnet.o
 obj-$(CONFIG_PCI_MSI)          += msi_sn.o
index 0101c79..08b0d9b 100644 (file)
@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info)
 {
 
        if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
-                       "SN_hub_error", (void *)hubdev_info)) {
-               printk("hub_error_init: Failed to request_irq for 0x%p\n",
+                       "SN_hub_error", hubdev_info)) {
+               printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
                    hubdev_info);
                return;
        }
index 9b3c113..94e5845 100644 (file)
@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
  *
  * Simply call tioce_do_dma_map() to create a map with the barrier bit set
  * in the address.
- */ static u64
+ */
+static u64
 tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
 {
        return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma
  *
  * Handle a CE error interrupt.  Simply a wrapper around a SAL call which
  * defers processing to the SGI prom.
- */ static irqreturn_t
+ */
+static irqreturn_t
 tioce_error_intr_handler(int irq, void *arg)
 {
        struct tioce_common *soft = arg;
index bb94ce7..297a48f 100644 (file)
@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES
          driver (SCSI/ATA) which supports enclosures
          or a SCSI enclosure device (SES) to use these services.
 
+config SGI_XP
+       tristate "Support communication between SGI SSIs"
+       depends on IA64_GENERIC || IA64_SGI_SN2
+       select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+       select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+       ---help---
+         An SGI machine can be divided into multiple Single System
+         Images which act independently of each other and have
+         hardware based memory protection from the others.  Enabling
+         this feature will allow for direct communication between SSIs
+         based on a network adapter and DMA messaging.
+
 endif # MISC_DEVICES
index 4581b25..5914da4 100644 (file)
@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6)    += eeprom_93cx6.o
 obj-$(CONFIG_INTEL_MENLOW)     += intel_menlow.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)       += kgdbts.o
+obj-$(CONFIG_SGI_XP)           += sgi-xp/
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
new file mode 100644 (file)
index 0000000..b6e40a7
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for SGI's XP devices.
+#
+
+obj-$(CONFIG_SGI_XP)           += xp.o
+xp-y                           := xp_main.o xp_nofault.o
+
+obj-$(CONFIG_SGI_XP)           += xpc.o
+xpc-y                          := xpc_main.o xpc_channel.o xpc_partition.o
+
+obj-$(CONFIG_SGI_XP)           += xpnet.o
similarity index 91%
rename from include/asm-ia64/sn/xp.h
rename to drivers/misc/sgi-xp/xp.h
index f7711b3..5515234 100644 (file)
@@ -3,18 +3,15 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
-
 /*
  * External Cross Partition (XP) structures and defines.
  */
 
-
-#ifndef _ASM_IA64_SN_XP_H
-#define _ASM_IA64_SN_XP_H
-
+#ifndef _DRIVERS_MISC_SGIXP_XP_H
+#define _DRIVERS_MISC_SGIXP_XP_H
 
 #include <linux/cache.h>
 #include <linux/hardirq.h>
 #include <asm/sn/types.h>
 #include <asm/sn/bte.h>
 
-
 #ifdef USE_DBUG_ON
 #define DBUG_ON(condition)     BUG_ON(condition)
 #else
 #define DBUG_ON(condition)
 #endif
 
-
 /*
  * Define the maximum number of logically defined partitions the system
  * can support. It is constrained by the maximum number of hardware
@@ -43,7 +38,6 @@
  */
 #define XP_MAX_PARTITIONS      64
 
-
 /*
  * Define the number of u64s required to represent all the C-brick nasids
  * as a bitmap.  The cross-partition kernel modules deal only with
@@ -54,7 +48,6 @@
 #define XP_NASID_MASK_BYTES    ((XP_MAX_PHYSNODE_ID + 7) / 8)
 #define XP_NASID_MASK_WORDS    ((XP_MAX_PHYSNODE_ID + 63) / 64)
 
-
 /*
  * Wrapper for bte_copy() that should it return a failure status will retry
  * the bte_copy() once in the hope that the failure was due to a temporary
@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
        bte_result_t ret;
        u64 pdst = ia64_tpa(vdst);
 
-
        /*
         * Ensure that the physically mapped memory is contiguous.
         *
@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
 
        ret = bte_copy(src, pdst, len, mode, notification);
        if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
-               if (!in_interrupt()) {
+               if (!in_interrupt())
                        cond_resched();
-               }
+
                ret = bte_copy(src, pdst, len, mode, notification);
        }
 
        return ret;
 }
 
-
 /*
  * XPC establishes channel connections between the local partition and any
  * other partition that is currently up. Over these channels, kernel-level
@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
 #error XPC_NCHANNELS exceeds MAXIMUM allowed.
 #endif
 
-
 /*
  * The format of an XPC message is as follows:
  *
@@ -160,12 +150,10 @@ struct xpc_msg {
        u64 payload;            /* user defined portion of message */
 };
 
-
 #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
 #define XPC_MSG_SIZE(_payload_size) \
                L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
 
-
 /*
  * Define the return values and values passed to user's callout functions.
  * (It is important to add new value codes at the end just preceding
@@ -267,10 +255,9 @@ enum xpc_retval {
                                /* 115: BTE end */
        xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
 
-       xpcUnknownReason        /* 116: unknown reason -- must be last in list */
+       xpcUnknownReason        /* 116: unknown reason - must be last in enum */
 };
 
-
 /*
  * Define the callout function types used by XPC to update the user on
  * connection activity and state changes (via the user function registered by
@@ -375,12 +362,11 @@ enum xpc_retval {
  * =====================+================================+=====================
  */
 
-typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
-               int ch_number, void *data, void *key);
-
-typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
-               int ch_number, void *key);
+typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
+                                 int ch_number, void *data, void *key);
 
+typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
+                                int ch_number, void *key);
 
 /*
  * The following is a registration entry. There is a global array of these,
@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
  */
 struct xpc_registration {
        struct mutex mutex;
-       xpc_channel_func func;          /* function to call */
-       void *key;                      /* pointer to user's key */
-       u16 nentries;                   /* #of msg entries in local msg queue */
-       u16 msg_size;                   /* message queue's message size */
-       u32 assigned_limit;             /* limit on #of assigned kthreads */
-       u32 idle_limit;                 /* limit on #of idle kthreads */
+       xpc_channel_func func;  /* function to call */
+       void *key;              /* pointer to user's key */
+       u16 nentries;           /* #of msg entries in local msg queue */
+       u16 msg_size;           /* message queue's message size */
+       u32 assigned_limit;     /* limit on #of assigned kthreads */
+       u32 idle_limit;         /* limit on #of idle kthreads */
 } ____cacheline_aligned;
 
-
 #define XPC_CHANNEL_REGISTERED(_c)     (xpc_registrations[_c].func != NULL)
 
-
 /* the following are valid xpc_allocate() flags */
-#define XPC_WAIT       0               /* wait flag */
-#define XPC_NOWAIT     1               /* no wait flag */
-
+#define XPC_WAIT       0       /* wait flag */
+#define XPC_NOWAIT     1       /* no wait flag */
 
 struct xpc_interface {
-       void (*connect)(int);
-       void (*disconnect)(int);
-       enum xpc_retval (*allocate)(partid_t, int, u32, void **);
-       enum xpc_retval (*send)(partid_t, int, void *);
-       enum xpc_retval (*send_notify)(partid_t, int, void *,
-                                               xpc_notify_func, void *);
-       void (*received)(partid_t, int, void *);
-       enum xpc_retval (*partid_to_nasids)(partid_t, void *);
+       void (*connect) (int);
+       void (*disconnect) (int);
+       enum xpc_retval (*allocate) (partid_t, int, u32, void **);
+       enum xpc_retval (*send) (partid_t, int, void *);
+       enum xpc_retval (*send_notify) (partid_t, int, void *,
+                                       xpc_notify_func, void *);
+       void (*received) (partid_t, int, void *);
+       enum xpc_retval (*partid_to_nasids) (partid_t, void *);
 };
 
-
 extern struct xpc_interface xpc_interface;
 
 extern void xpc_set_interface(void (*)(int),
-               void (*)(int),
-               enum xpc_retval (*)(partid_t, int, u32, void **),
-               enum xpc_retval (*)(partid_t, int, void *),
-               enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
-                                                               void *),
-               void (*)(partid_t, int, void *),
-               enum xpc_retval (*)(partid_t, void *));
+                             void (*)(int),
+                             enum xpc_retval (*)(partid_t, int, u32, void **),
+                             enum xpc_retval (*)(partid_t, int, void *),
+                             enum xpc_retval (*)(partid_t, int, void *,
+                                                 xpc_notify_func, void *),
+                             void (*)(partid_t, int, void *),
+                             enum xpc_retval (*)(partid_t, void *));
 extern void xpc_clear_interface(void);
 
-
 extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
-                                               u16, u32, u32);
+                                  u16, u32, u32);
 extern void xpc_disconnect(int);
 
 static inline enum xpc_retval
@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload)
 
 static inline enum xpc_retval
 xpc_send_notify(partid_t partid, int ch_number, void *payload,
-                       xpc_notify_func func, void *key)
+               xpc_notify_func func, void *key)
 {
        return xpc_interface.send_notify(partid, ch_number, payload, func, key);
 }
@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids)
        return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
-
 extern u64 xp_nofault_PIOR_target;
 extern int xp_nofault_PIOR(void *);
 extern int xp_error_PIOR(void);
 
-
-#endif /* _ASM_IA64_SN_XP_H */
-
+#endif /* _DRIVERS_MISC_SGIXP_XP_H */
similarity index 69%
rename from arch/ia64/sn/kernel/xp_main.c
rename to drivers/misc/sgi-xp/xp_main.c
index b7ea466..1fbf99b 100644 (file)
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition (XP) base.
  *
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/xp.h>
-
+#include "xp.h"
 
 /*
- * Target of nofault PIO read.
+ * The export of xp_nofault_PIOR needs to happen here since it is defined
+ * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
+ * defined here.
  */
-u64 xp_nofault_PIOR_target;
+EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
 
+u64 xp_nofault_PIOR_target;
+EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
 
 /*
  * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
  * users of XPC.
  */
 struct xpc_registration xpc_registrations[XPC_NCHANNELS];
-
+EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
  * Initialize the XPC interface to indicate that XPC isn't loaded.
  */
-static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
+static enum xpc_retval
+xpc_notloaded(void)
+{
+       return xpcNotLoaded;
+}
 
 struct xpc_interface xpc_interface = {
-       (void (*)(int)) xpc_notloaded,
-       (void (*)(int)) xpc_notloaded,
-       (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
-       (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
-       (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
-                                                       xpc_notloaded,
-       (void (*)(partid_t, int, void *)) xpc_notloaded,
-       (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
+       (void (*)(int))xpc_notloaded,
+       (void (*)(int))xpc_notloaded,
+       (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
+       (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
+       (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
+           xpc_notloaded,
+       (void (*)(partid_t, int, void *))xpc_notloaded,
+       (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
 };
-
+EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
  * XPC calls this when it (the XPC module) has been loaded.
  */
 void
-xpc_set_interface(void (*connect)(int),
-               void (*disconnect)(int),
-               enum xpc_retval (*allocate)(partid_t, int, u32, void **),
-               enum xpc_retval (*send)(partid_t, int, void *),
-               enum xpc_retval (*send_notify)(partid_t, int, void *,
-                                               xpc_notify_func, void *),
-               void (*received)(partid_t, int, void *),
-               enum xpc_retval (*partid_to_nasids)(partid_t, void *))
+xpc_set_interface(void (*connect) (int),
+                 void (*disconnect) (int),
+                 enum xpc_retval (*allocate) (partid_t, int, u32, void **),
+                 enum xpc_retval (*send) (partid_t, int, void *),
+                 enum xpc_retval (*send_notify) (partid_t, int, void *,
+                                                 xpc_notify_func, void *),
+                 void (*received) (partid_t, int, void *),
+                 enum xpc_retval (*partid_to_nasids) (partid_t, void *))
 {
        xpc_interface.connect = connect;
        xpc_interface.disconnect = disconnect;
@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int),
        xpc_interface.received = received;
        xpc_interface.partid_to_nasids = partid_to_nasids;
 }
-
+EXPORT_SYMBOL_GPL(xpc_set_interface);
 
 /*
  * XPC calls this when it (the XPC module) is being unloaded.
@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int),
 void
 xpc_clear_interface(void)
 {
-       xpc_interface.connect = (void (*)(int)) xpc_notloaded;
-       xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
-       xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
-                                       void **)) xpc_notloaded;
-       xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
-                                       xpc_notloaded;
-       xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
-                                   xpc_notify_func, void *)) xpc_notloaded;
+       xpc_interface.connect = (void (*)(int))xpc_notloaded;
+       xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
+       xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
+                                                    void **))xpc_notloaded;
+       xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
+           xpc_notloaded;
+       xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
+                                                       xpc_notify_func,
+                                                       void *))xpc_notloaded;
        xpc_interface.received = (void (*)(partid_t, int, void *))
-                                       xpc_notloaded;
-       xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
-                                       xpc_notloaded;
+           xpc_notloaded;
+       xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
+           xpc_notloaded;
 }
-
+EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
 /*
  * Register for automatic establishment of a channel connection whenever
@@ -125,11 +131,10 @@ xpc_clear_interface(void)
  */
 enum xpc_retval
 xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
-               u16 nentries, u32 assigned_limit, u32 idle_limit)
+           u16 nentries, u32 assigned_limit, u32 idle_limit)
 {
        struct xpc_registration *registration;
 
-
        DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
        DBUG_ON(payload_size == 0 || nentries == 0);
        DBUG_ON(func == NULL);
@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        registration = &xpc_registrations[ch_number];
 
-       if (mutex_lock_interruptible(&registration->mutex) != 0) {
+       if (mutex_lock_interruptible(&registration->mutex) != 0)
                return xpcInterrupted;
-       }
 
        /* if XPC_CHANNEL_REGISTERED(ch_number) */
        if (registration->func != NULL) {
@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        return xpcSuccess;
 }
-
+EXPORT_SYMBOL_GPL(xpc_connect);
 
 /*
  * Remove the registration for automatic connection of the specified channel
@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number)
 {
        struct xpc_registration *registration;
 
-
        DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 
        registration = &xpc_registrations[ch_number];
@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number)
 
        return;
 }
-
+EXPORT_SYMBOL_GPL(xpc_disconnect);
 
 int __init
 xp_init(void)
 {
        int ret, ch_number;
-       u64 func_addr = *(u64 *) xp_nofault_PIOR;
-       u64 err_func_addr = *(u64 *) xp_error_PIOR;
+       u64 func_addr = *(u64 *)xp_nofault_PIOR;
+       u64 err_func_addr = *(u64 *)xp_error_PIOR;
 
-
-       if (!ia64_platform_is("sn2")) {
+       if (!ia64_platform_is("sn2"))
                return -ENODEV;
-       }
 
        /*
         * Register a nofault code region which performs a cross-partition
@@ -236,55 +237,43 @@ xp_init(void)
         * least some CPUs on Shubs <= v1.2, which unfortunately we have to
         * work around).
         */
-       if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
-                                               err_func_addr, 1, 1)) != 0) {
+       ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
+                                      1, 1);
+       if (ret != 0) {
                printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
-                       ret);
+                      ret);
        }
        /*
         * Setup the nofault PIO read target. (There is no special reason why
         * SH_IPI_ACCESS was selected.)
         */
-       if (is_shub2()) {
+       if (is_shub2())
                xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
-       } else {
+       else
                xp_nofault_PIOR_target = SH1_IPI_ACCESS;
-       }
 
        /* initialize the connection registration mutex */
-       for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
+       for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
                mutex_init(&xpc_registrations[ch_number].mutex);
-       }
 
        return 0;
 }
-module_init(xp_init);
 
+module_init(xp_init);
 
 void __exit
 xp_exit(void)
 {
-       u64 func_addr = *(u64 *) xp_nofault_PIOR;
-       u64 err_func_addr = *(u64 *) xp_error_PIOR;
-
+       u64 func_addr = *(u64 *)xp_nofault_PIOR;
+       u64 err_func_addr = *(u64 *)xp_error_PIOR;
 
        /* unregister the PIO read nofault code region */
-       (void) sn_register_nofault_code(func_addr, err_func_addr,
-                                       err_func_addr, 1, 0);
+       (void)sn_register_nofault_code(func_addr, err_func_addr,
+                                      err_func_addr, 1, 0);
 }
-module_exit(xp_exit);
 
+module_exit(xp_exit);
 
 MODULE_AUTHOR("Silicon Graphics, Inc.");
 MODULE_DESCRIPTION("Cross Partition (XP) base");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(xp_nofault_PIOR);
-EXPORT_SYMBOL(xp_nofault_PIOR_target);
-EXPORT_SYMBOL(xpc_registrations);
-EXPORT_SYMBOL(xpc_interface);
-EXPORT_SYMBOL(xpc_clear_interface);
-EXPORT_SYMBOL(xpc_set_interface);
-EXPORT_SYMBOL(xpc_connect);
-EXPORT_SYMBOL(xpc_disconnect);
-
similarity index 95%
rename from arch/ia64/sn/kernel/xp_nofault.S
rename to drivers/misc/sgi-xp/xp_nofault.S
index 98e7c7d..e38d433 100644 (file)
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * The xp_nofault_PIOR function takes a pointer to a remote PIO register
  * and attempts to load and consume a value from it.  This function
similarity index 75%
rename from include/asm-ia64/sn/xpc.h
rename to drivers/misc/sgi-xp/xpc.h
index 3c0900a..9eb6d4a 100644 (file)
@@ -3,17 +3,15 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) structures and macros.
  */
 
-#ifndef _ASM_IA64_SN_XPC_H
-#define _ASM_IA64_SN_XPC_H
-
+#ifndef _DRIVERS_MISC_SGIXP_XPC_H
+#define _DRIVERS_MISC_SGIXP_XPC_H
 
 #include <linux/interrupt.h>
 #include <linux/sysctl.h>
@@ -27,8 +25,7 @@
 #include <asm/sn/addrs.h>
 #include <asm/sn/mspec.h>
 #include <asm/sn/shub_mmr.h>
-#include <asm/sn/xp.h>
-
+#include "xp.h"
 
 /*
  * XPC Version numbers consist of a major and minor number. XPC can always
@@ -39,7 +36,6 @@
 #define XPC_VERSION_MAJOR(_v)          ((_v) >> 4)
 #define XPC_VERSION_MINOR(_v)          ((_v) & 0xf)
 
-
 /*
  * The next macros define word or bit representations for given
  * C-brick nasid in either the SAL provided bit array representing
@@ -67,7 +63,6 @@
 /* define the process name of the discovery thread */
 #define XPC_DISCOVERY_THREAD_NAME      "xpc_discovery"
 
-
 /*
  * the reserved page
  *
@@ -115,16 +110,16 @@ struct xpc_rsvd_page {
        u8 partid;              /* SAL: partition ID */
        u8 version;
        u8 pad1[6];             /* align to next u64 in cacheline */
-       volatile u64 vars_pa;
+       u64 vars_pa;            /* physical address of struct xpc_vars */
        struct timespec stamp;  /* time when reserved page was setup by XPC */
        u64 pad2[9];            /* align to last u64 in cacheline */
        u64 nasids_size;        /* SAL: size of each nasid mask in bytes */
 };
 
-#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */
 
 #define XPC_SUPPORTS_RP_STAMP(_version) \
-                       (_version >= _XPC_VERSION(1,1))
+                       (_version >= _XPC_VERSION(1, 1))
 
 /*
  * compare stamps - the return value is:
@@ -138,14 +133,13 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
 {
        int ret;
 
-
-       if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
+       ret = stamp1->tv_sec - stamp2->tv_sec;
+       if (ret == 0)
                ret = stamp1->tv_nsec - stamp2->tv_nsec;
-       }
+
        return ret;
 }
 
-
 /*
  * Define the structures by which XPC variables can be exported to other
  * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
@@ -172,11 +166,10 @@ struct xpc_vars {
        AMO_t *amos_page;       /* vaddr of page of AMOs from MSPEC driver */
 };
 
-#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
+#define XPC_V_VERSION _XPC_VERSION(3, 1)    /* version 3.1 of the cross vars */
 
 #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
-                       (_version >= _XPC_VERSION(3,1))
-
+                       (_version >= _XPC_VERSION(3, 1))
 
 static inline int
 xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
@@ -193,7 +186,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
                old_mask = vars->heartbeating_to_mask;
                new_mask = (old_mask | (1UL << partid));
        } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
-                                                       old_mask);
+                old_mask);
 }
 
 static inline void
@@ -205,10 +198,9 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
                old_mask = vars->heartbeating_to_mask;
                new_mask = (old_mask & ~(1UL << partid));
        } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
-                                                       old_mask);
+                old_mask);
 }
 
-
 /*
  * The AMOs page consists of a number of AMO variables which are divided into
  * four groups, The first two groups are used to identify an IRQ's sender.
@@ -222,7 +214,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
 #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
 #define XPC_DISENGAGE_REQUEST_AMO  (XPC_ENGAGED_PARTITIONS_AMO + 1)
 
-
 /*
  * The following structure describes the per partition specific variables.
  *
@@ -234,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
  * occupies half a cacheline.
  */
 struct xpc_vars_part {
-       volatile u64 magic;
+       u64 magic;
 
        u64 openclose_args_pa;  /* physical address of open and close args */
        u64 GPs_pa;             /* physical address of Get/Put values */
@@ -257,20 +248,20 @@ struct xpc_vars_part {
  * MAGIC2 indicates that this partition has pulled the remote partititions
  * per partition variables that pertain to this partition.
  */
-#define XPC_VP_MAGIC1  0x0053524156435058L  /* 'XPCVARS\0'L (little endian) */
-#define XPC_VP_MAGIC2  0x0073726176435058L  /* 'XPCvars\0'L (little endian) */
-
+#define XPC_VP_MAGIC1  0x0053524156435058L   /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2  0x0073726176435058L   /* 'XPCvars\0'L (little endian) */
 
 /* the reserved page sizes and offsets */
 
 #define XPC_RP_HEADER_SIZE     L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
-#define XPC_RP_VARS_SIZE       L1_CACHE_ALIGN(sizeof(struct xpc_vars))
+#define XPC_RP_VARS_SIZE       L1_CACHE_ALIGN(sizeof(struct xpc_vars))
 
-#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
+#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
 #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS(_rp)       ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS_PART(_rp)  (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
-
+#define XPC_RP_VARS(_rp)       ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
+                                   xp_nasid_mask_words))
+#define XPC_RP_VARS_PART(_rp)  ((struct xpc_vars_part *) \
+                                   ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
 
 /*
  * Functions registered by add_timer() or called by kernel_thread() only
@@ -285,21 +276,17 @@ struct xpc_vars_part {
 #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
 #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
 
-
-
 /*
  * Define a Get/Put value pair (pointers) used with a message queue.
  */
 struct xpc_gp {
-       volatile s64 get;       /* Get value */
-       volatile s64 put;       /* Put value */
+       s64 get;                /* Get value */
+       s64 put;                /* Put value */
 };
 
 #define XPC_GP_SIZE \
                L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
 
-
-
 /*
  * Define a structure that contains arguments associated with opening and
  * closing a channel.
@@ -315,20 +302,15 @@ struct xpc_openclose_args {
 #define XPC_OPENCLOSE_ARGS_SIZE \
              L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
 
-
-
 /* struct xpc_msg flags */
 
 #define        XPC_M_DONE              0x01    /* msg has been received/consumed */
 #define        XPC_M_READY             0x02    /* msg is ready to be sent */
 #define        XPC_M_INTERRUPT         0x04    /* send interrupt when msg consumed */
 
-
 #define XPC_MSG_ADDRESS(_payload) \
                ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
 
-
-
 /*
  * Defines notify entry.
  *
@@ -336,19 +318,17 @@ struct xpc_openclose_args {
  * and consumed by the intended recipient.
  */
 struct xpc_notify {
-       volatile u8 type;               /* type of notification */
+       u8 type;                /* type of notification */
 
        /* the following two fields are only used if type == XPC_N_CALL */
-       xpc_notify_func func;           /* user's notify function */
-       void *key;                      /* pointer to user's key */
+       xpc_notify_func func;   /* user's notify function */
+       void *key;              /* pointer to user's key */
 };
 
 /* struct xpc_notify type of notification */
 
 #define        XPC_N_CALL              0x01    /* notify function provided by user */
 
-
-
 /*
  * Define the structure that manages all the stuff required by a channel. In
  * particular, they are used to manage the messages sent across the channel.
@@ -428,48 +408,48 @@ struct xpc_notify {
  *     messages.
  */
 struct xpc_channel {
-       partid_t partid;                /* ID of remote partition connected */
-       spinlock_t lock;                /* lock for updating this structure */
-       u32 flags;                      /* general flags */
+       partid_t partid;        /* ID of remote partition connected */
+       spinlock_t lock;        /* lock for updating this structure */
+       u32 flags;              /* general flags */
 
-       enum xpc_retval reason;         /* reason why channel is disconnect'g */
-       int reason_line;                /* line# disconnect initiated from */
+       enum xpc_retval reason; /* reason why channel is disconnect'g */
+       int reason_line;        /* line# disconnect initiated from */
 
-       u16 number;                     /* channel # */
+       u16 number;             /* channel # */
 
-       u16 msg_size;                   /* sizeof each msg entry */
-       u16 local_nentries;             /* #of msg entries in local msg queue */
-       u16 remote_nentries;            /* #of msg entries in remote msg queue*/
+       u16 msg_size;           /* sizeof each msg entry */
+       u16 local_nentries;     /* #of msg entries in local msg queue */
+       u16 remote_nentries;    /* #of msg entries in remote msg queue */
 
        void *local_msgqueue_base;      /* base address of kmalloc'd space */
        struct xpc_msg *local_msgqueue; /* local message queue */
        void *remote_msgqueue_base;     /* base address of kmalloc'd space */
-       struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
-                                       /* local message queue */
-       u64 remote_msgqueue_pa;         /* phys addr of remote partition's */
-                                       /* local message queue */
+       struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
+                                        /* local message queue */
+       u64 remote_msgqueue_pa; /* phys addr of remote partition's */
+                               /* local message queue */
 
-       atomic_t references;            /* #of external references to queues */
+       atomic_t references;    /* #of external references to queues */
 
-       atomic_t n_on_msg_allocate_wq;   /* #on msg allocation wait queue */
-       wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
+       atomic_t n_on_msg_allocate_wq;  /* #on msg allocation wait queue */
+       wait_queue_head_t msg_allocate_wq;      /* msg allocation wait queue */
 
-       u8 delayed_IPI_flags;           /* IPI flags received, but delayed */
-                                       /* action until channel disconnected */
+       u8 delayed_IPI_flags;   /* IPI flags received, but delayed */
+                               /* action until channel disconnected */
 
        /* queue of msg senders who want to be notified when msg received */
 
-       atomic_t n_to_notify;           /* #of msg senders to notify */
-       struct xpc_notify *notify_queue;/* notify queue for messages sent */
+       atomic_t n_to_notify;   /* #of msg senders to notify */
+       struct xpc_notify *notify_queue;    /* notify queue for messages sent */
 
-       xpc_channel_func func;          /* user's channel function */
-       void *key;                      /* pointer to user's key */
+       xpc_channel_func func;  /* user's channel function */
+       void *key;              /* pointer to user's key */
 
        struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
-       struct completion wdisconnect_wait; /* wait for channel disconnect */
+       struct completion wdisconnect_wait;    /* wait for channel disconnect */
 
        struct xpc_openclose_args *local_openclose_args; /* args passed on */
-                                       /* opening or closing of channel */
+                                            /* opening or closing of channel */
 
        /* various flavors of local and remote Get/Put values */
 
@@ -477,56 +457,48 @@ struct xpc_channel {
        struct xpc_gp remote_GP;        /* remote Get/Put values */
        struct xpc_gp w_local_GP;       /* working local Get/Put values */
        struct xpc_gp w_remote_GP;      /* working remote Get/Put values */
-       s64 next_msg_to_pull;           /* Put value of next msg to pull */
+       s64 next_msg_to_pull;   /* Put value of next msg to pull */
 
        /* kthread management related fields */
 
-// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
-// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
-// >>> dependent on activity over the last interval of time
        atomic_t kthreads_assigned;     /* #of kthreads assigned to channel */
-       u32 kthreads_assigned_limit;    /* limit on #of kthreads assigned */
-       atomic_t kthreads_idle;         /* #of kthreads idle waiting for work */
+       u32 kthreads_assigned_limit;    /* limit on #of kthreads assigned */
+       atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
        u32 kthreads_idle_limit;        /* limit on #of kthreads idle */
        atomic_t kthreads_active;       /* #of kthreads actively working */
-       // >>> following field is temporary
-       u32 kthreads_created;           /* total #of kthreads created */
 
        wait_queue_head_t idle_wq;      /* idle kthread wait queue */
 
 } ____cacheline_aligned;
 
-
 /* struct xpc_channel flags */
 
-#define        XPC_C_WASCONNECTED      0x00000001 /* channel was connected */
+#define        XPC_C_WASCONNECTED      0x00000001      /* channel was connected */
 
-#define        XPC_C_ROPENREPLY        0x00000002 /* remote open channel reply */
-#define        XPC_C_OPENREPLY         0x00000004 /* local open channel reply */
-#define        XPC_C_ROPENREQUEST      0x00000008 /* remote open channel request */
-#define        XPC_C_OPENREQUEST       0x00000010 /* local open channel request */
+#define        XPC_C_ROPENREPLY        0x00000002      /* remote open channel reply */
+#define        XPC_C_OPENREPLY         0x00000004      /* local open channel reply */
+#define        XPC_C_ROPENREQUEST      0x00000008     /* remote open channel request */
+#define        XPC_C_OPENREQUEST       0x00000010      /* local open channel request */
 
 #define        XPC_C_SETUP             0x00000020 /* channel's msgqueues are alloc'd */
-#define        XPC_C_CONNECTEDCALLOUT  0x00000040 /* connected callout initiated */
+#define        XPC_C_CONNECTEDCALLOUT  0x00000040     /* connected callout initiated */
 #define        XPC_C_CONNECTEDCALLOUT_MADE \
-                               0x00000080 /* connected callout completed */
-#define        XPC_C_CONNECTED         0x00000100 /* local channel is connected */
-#define        XPC_C_CONNECTING        0x00000200 /* channel is being connected */
+                               0x00000080     /* connected callout completed */
+#define        XPC_C_CONNECTED         0x00000100      /* local channel is connected */
+#define        XPC_C_CONNECTING        0x00000200      /* channel is being connected */
 
-#define        XPC_C_RCLOSEREPLY       0x00000400 /* remote close channel reply */
-#define        XPC_C_CLOSEREPLY        0x00000800 /* local close channel reply */
-#define        XPC_C_RCLOSEREQUEST     0x00001000 /* remote close channel request */
-#define        XPC_C_CLOSEREQUEST      0x00002000 /* local close channel request */
+#define        XPC_C_RCLOSEREPLY       0x00000400      /* remote close channel reply */
+#define        XPC_C_CLOSEREPLY        0x00000800      /* local close channel reply */
+#define        XPC_C_RCLOSEREQUEST     0x00001000    /* remote close channel request */
+#define        XPC_C_CLOSEREQUEST      0x00002000     /* local close channel request */
 
-#define        XPC_C_DISCONNECTED      0x00004000 /* channel is disconnected */
-#define        XPC_C_DISCONNECTING     0x00008000 /* channel is being disconnected */
+#define        XPC_C_DISCONNECTED      0x00004000      /* channel is disconnected */
+#define        XPC_C_DISCONNECTING     0x00008000   /* channel is being disconnected */
 #define        XPC_C_DISCONNECTINGCALLOUT \
                                0x00010000 /* disconnecting callout initiated */
 #define        XPC_C_DISCONNECTINGCALLOUT_MADE \
                                0x00020000 /* disconnecting callout completed */
-#define        XPC_C_WDISCONNECT       0x00040000 /* waiting for channel disconnect */
-
-
+#define        XPC_C_WDISCONNECT       0x00040000  /* waiting for channel disconnect */
 
 /*
  * Manages channels on a partition basis. There is one of these structures
@@ -537,33 +509,31 @@ struct xpc_partition {
 
        /* XPC HB infrastructure */
 
-       u8 remote_rp_version;           /* version# of partition's rsvd pg */
-       struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
-       u64 remote_rp_pa;               /* phys addr of partition's rsvd pg */
-       u64 remote_vars_pa;             /* phys addr of partition's vars */
+       u8 remote_rp_version;   /* version# of partition's rsvd pg */
+       struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
+       u64 remote_rp_pa;       /* phys addr of partition's rsvd pg */
+       u64 remote_vars_pa;     /* phys addr of partition's vars */
        u64 remote_vars_part_pa;        /* phys addr of partition's vars part */
-       u64 last_heartbeat;             /* HB at last read */
+       u64 last_heartbeat;     /* HB at last read */
        u64 remote_amos_page_pa;        /* phys addr of partition's amos page */
-       int remote_act_nasid;           /* active part's act/deact nasid */
+       int remote_act_nasid;   /* active part's act/deact nasid */
        int remote_act_phys_cpuid;      /* active part's act/deact phys cpuid */
-       u32 act_IRQ_rcvd;               /* IRQs since activation */
-       spinlock_t act_lock;            /* protect updating of act_state */
-       u8 act_state;                   /* from XPC HB viewpoint */
-       u8 remote_vars_version;         /* version# of partition's vars */
-       enum xpc_retval reason;         /* reason partition is deactivating */
-       int reason_line;                /* line# deactivation initiated from */
-       int reactivate_nasid;           /* nasid in partition to reactivate */
-
-       unsigned long disengage_request_timeout; /* timeout in jiffies */
+       u32 act_IRQ_rcvd;       /* IRQs since activation */
+       spinlock_t act_lock;    /* protect updating of act_state */
+       u8 act_state;           /* from XPC HB viewpoint */
+       u8 remote_vars_version; /* version# of partition's vars */
+       enum xpc_retval reason; /* reason partition is deactivating */
+       int reason_line;        /* line# deactivation initiated from */
+       int reactivate_nasid;   /* nasid in partition to reactivate */
+
+       unsigned long disengage_request_timeout;        /* timeout in jiffies */
        struct timer_list disengage_request_timer;
 
-
        /* XPC infrastructure referencing and teardown control */
 
-       volatile u8 setup_state;        /* infrastructure setup state */
+       u8 setup_state;         /* infrastructure setup state */
        wait_queue_head_t teardown_wq;  /* kthread waiting to teardown infra */
-       atomic_t references;            /* #of references to infrastructure */
-
+       atomic_t references;    /* #of references to infrastructure */
 
        /*
         * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
@@ -572,53 +542,48 @@ struct xpc_partition {
         * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
         */
 
-
-       u8 nchannels;              /* #of defined channels supported */
-       atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
-       atomic_t nchannels_engaged;/* #of channels engaged with remote part */
-       struct xpc_channel *channels;/* array of channel structures */
-
-       void *local_GPs_base;     /* base address of kmalloc'd space */
-       struct xpc_gp *local_GPs; /* local Get/Put values */
-       void *remote_GPs_base;    /* base address of kmalloc'd space */
-       struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
-                                 /* values */
-       u64 remote_GPs_pa;        /* phys address of remote partition's local */
-                                 /* Get/Put values */
-
+       u8 nchannels;           /* #of defined channels supported */
+       atomic_t nchannels_active;  /* #of channels that are not DISCONNECTED */
+       atomic_t nchannels_engaged;  /* #of channels engaged with remote part */
+       struct xpc_channel *channels;   /* array of channel structures */
+
+       void *local_GPs_base;   /* base address of kmalloc'd space */
+       struct xpc_gp *local_GPs;       /* local Get/Put values */
+       void *remote_GPs_base;  /* base address of kmalloc'd space */
+       struct xpc_gp *remote_GPs;      /* copy of remote partition's local */
+                                       /* Get/Put values */
+       u64 remote_GPs_pa;      /* phys address of remote partition's local */
+                               /* Get/Put values */
 
        /* fields used to pass args when opening or closing a channel */
 
-       void *local_openclose_args_base;  /* base address of kmalloc'd space */
-       struct xpc_openclose_args *local_openclose_args;  /* local's args */
-       void *remote_openclose_args_base; /* base address of kmalloc'd space */
+       void *local_openclose_args_base;   /* base address of kmalloc'd space */
+       struct xpc_openclose_args *local_openclose_args;      /* local's args */
+       void *remote_openclose_args_base;  /* base address of kmalloc'd space */
        struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
-                                         /* args */
-       u64 remote_openclose_args_pa;     /* phys addr of remote's args */
-
+                                                         /* args */
+       u64 remote_openclose_args_pa;   /* phys addr of remote's args */
 
        /* IPI sending, receiving and handling related fields */
 
-       int remote_IPI_nasid;       /* nasid of where to send IPIs */
-       int remote_IPI_phys_cpuid;  /* phys CPU ID of where to send IPIs */
-       AMO_t *remote_IPI_amo_va;   /* address of remote IPI AMO_t structure */
-
-       AMO_t *local_IPI_amo_va;    /* address of IPI AMO_t structure */
-       u64 local_IPI_amo;          /* IPI amo flags yet to be handled */
-       char IPI_owner[8];          /* IPI owner's name */
-       struct timer_list dropped_IPI_timer; /* dropped IPI timer */
+       int remote_IPI_nasid;   /* nasid of where to send IPIs */
+       int remote_IPI_phys_cpuid;      /* phys CPU ID of where to send IPIs */
+       AMO_t *remote_IPI_amo_va;    /* address of remote IPI AMO_t structure */
 
-       spinlock_t IPI_lock;        /* IPI handler lock */
+       AMO_t *local_IPI_amo_va;        /* address of IPI AMO_t structure */
+       u64 local_IPI_amo;      /* IPI amo flags yet to be handled */
+       char IPI_owner[8];      /* IPI owner's name */
+       struct timer_list dropped_IPI_timer;    /* dropped IPI timer */
 
+       spinlock_t IPI_lock;    /* IPI handler lock */
 
        /* channel manager related fields */
 
        atomic_t channel_mgr_requests;  /* #of requests to activate chan mgr */
-       wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
+       wait_queue_head_t channel_mgr_wq;       /* channel mgr's wait queue */
 
 } ____cacheline_aligned;
 
-
 /* struct xpc_partition act_state values (for XPC HB) */
 
 #define        XPC_P_INACTIVE          0x00    /* partition is not active */
@@ -627,11 +592,9 @@ struct xpc_partition {
 #define XPC_P_ACTIVE           0x03    /* xpc_partition_up() was called */
 #define XPC_P_DEACTIVATING     0x04    /* partition deactivation initiated */
 
-
 #define XPC_DEACTIVATE_PARTITION(_p, _reason) \
                        xpc_deactivate_partition(__LINE__, (_p), (_reason))
 
-
 /* struct xpc_partition setup_state values */
 
 #define XPC_P_UNSET            0x00    /* infrastructure was never setup */
@@ -639,8 +602,6 @@ struct xpc_partition {
 #define XPC_P_WTEARDOWN                0x02    /* waiting to teardown infrastructure */
 #define XPC_P_TORNDOWN         0x03    /* infrastructure is torndown */
 
-
-
 /*
  * struct xpc_partition IPI_timer #of seconds to wait before checking for
  * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -648,22 +609,17 @@ struct xpc_partition {
  */
 #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
 
-
 /* number of seconds to wait for other partitions to disengage */
 #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT        90
 
 /* interval in seconds to print 'waiting disengagement' messages */
 #define XPC_DISENGAGE_PRINTMSG_INTERVAL                10
 
-
 #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
 
-
-
 /* found in xp_main.c */
 extern struct xpc_registration xpc_registrations[];
 
-
 /* found in xpc_main.c */
 extern struct device *xpc_part;
 extern struct device *xpc_chan;
@@ -676,7 +632,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
 extern void xpc_create_kthreads(struct xpc_channel *, int, int);
 extern void xpc_disconnect_wait(int);
 
-
 /* found in xpc_partition.c */
 extern int xpc_exiting;
 extern struct xpc_vars *xpc_vars;
@@ -696,10 +651,9 @@ extern void xpc_mark_partition_inactive(struct xpc_partition *);
 extern void xpc_discovery(void);
 extern void xpc_check_remote_hb(void);
 extern void xpc_deactivate_partition(const int, struct xpc_partition *,
-                                               enum xpc_retval);
+                                    enum xpc_retval);
 extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
 
-
 /* found in xpc_channel.c */
 extern void xpc_initiate_connect(int);
 extern void xpc_initiate_disconnect(int);
@@ -714,23 +668,18 @@ extern void xpc_process_channel_activity(struct xpc_partition *);
 extern void xpc_connected_callout(struct xpc_channel *);
 extern void xpc_deliver_msg(struct xpc_channel *);
 extern void xpc_disconnect_channel(const int, struct xpc_channel *,
-                                       enum xpc_retval, unsigned long *);
+                                  enum xpc_retval, unsigned long *);
 extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
 extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
 extern void xpc_teardown_infrastructure(struct xpc_partition *);
 
-
-
 static inline void
 xpc_wakeup_channel_mgr(struct xpc_partition *part)
 {
-       if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
+       if (atomic_inc_return(&part->channel_mgr_requests) == 1)
                wake_up(&part->channel_mgr_wq);
-       }
 }
 
-
-
 /*
  * These next two inlines are used to keep us from tearing down a channel's
  * msg queues while a thread may be referencing them.
@@ -747,17 +696,13 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
        s32 refs = atomic_dec_return(&ch->references);
 
        DBUG_ON(refs < 0);
-       if (refs == 0) {
+       if (refs == 0)
                xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
-       }
 }
 
-
-
 #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
                xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
 
-
 /*
  * These two inlines are used to keep us from tearing down a partition's
  * setup infrastructure while a thread may be referencing it.
@@ -767,11 +712,9 @@ xpc_part_deref(struct xpc_partition *part)
 {
        s32 refs = atomic_dec_return(&part->references);
 
-
        DBUG_ON(refs < 0);
-       if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
+       if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
                wake_up(&part->teardown_wq);
-       }
 }
 
 static inline int
@@ -779,17 +722,14 @@ xpc_part_ref(struct xpc_partition *part)
 {
        int setup;
 
-
        atomic_inc(&part->references);
        setup = (part->setup_state == XPC_P_SETUP);
-       if (!setup) {
+       if (!setup)
                xpc_part_deref(part);
-       }
+
        return setup;
 }
 
-
-
 /*
  * The following macro is to be used for the setting of the reason and
  * reason_line fields in both the struct xpc_channel and struct xpc_partition
@@ -801,8 +741,6 @@ xpc_part_ref(struct xpc_partition *part)
                (_p)->reason_line = _line; \
        }
 
-
-
 /*
  * This next set of inlines are used to keep track of when a partition is
  * potentially engaged in accessing memory belonging to another partition.
@@ -812,23 +750,24 @@ static inline void
 xpc_mark_partition_engaged(struct xpc_partition *part)
 {
        unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
+       AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+                                  (XPC_ENGAGED_PARTITIONS_AMO *
+                                   sizeof(AMO_t)));
 
        local_irq_save(irq_flags);
 
        /* set bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
-                                               (1UL << sn_partition_id));
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
+                        (1UL << sn_partition_id));
        /*
         * We must always use the nofault function regardless of whether we
         * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
         * didn't, we'd never know that the other partition is down and would
         * keep sending IPIs and AMOs to it until the heartbeat times out.
         */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
+       (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                                                              variable),
+                                                    xp_nofault_PIOR_target));
 
        local_irq_restore(irq_flags);
 }
@@ -837,23 +776,24 @@ static inline void
 xpc_mark_partition_disengaged(struct xpc_partition *part)
 {
        unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
+       AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+                                  (XPC_ENGAGED_PARTITIONS_AMO *
+                                   sizeof(AMO_t)));
 
        local_irq_save(irq_flags);
 
        /* clear bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                               ~(1UL << sn_partition_id));
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+                        ~(1UL << sn_partition_id));
        /*
         * We must always use the nofault function regardless of whether we
         * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
         * didn't, we'd never know that the other partition is down and would
         * keep sending IPIs and AMOs to it until the heartbeat times out.
         */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
+       (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                                                              variable),
+                                                    xp_nofault_PIOR_target));
 
        local_irq_restore(irq_flags);
 }
@@ -862,23 +802,23 @@ static inline void
 xpc_request_partition_disengage(struct xpc_partition *part)
 {
        unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
+       AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+                                  (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
 
        local_irq_save(irq_flags);
 
        /* set bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
-                                               (1UL << sn_partition_id));
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
+                        (1UL << sn_partition_id));
        /*
         * We must always use the nofault function regardless of whether we
         * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
         * didn't, we'd never know that the other partition is down and would
         * keep sending IPIs and AMOs to it until the heartbeat times out.
         */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
+       (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                                                              variable),
+                                                    xp_nofault_PIOR_target));
 
        local_irq_restore(irq_flags);
 }
@@ -887,23 +827,23 @@ static inline void
 xpc_cancel_partition_disengage_request(struct xpc_partition *part)
 {
        unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
+       AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+                                  (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
 
        local_irq_save(irq_flags);
 
        /* clear bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                               ~(1UL << sn_partition_id));
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+                        ~(1UL << sn_partition_id));
        /*
         * We must always use the nofault function regardless of whether we
         * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
         * didn't, we'd never know that the other partition is down and would
         * keep sending IPIs and AMOs to it until the heartbeat times out.
         */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
+       (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                                                              variable),
+                                                    xp_nofault_PIOR_target));
 
        local_irq_restore(irq_flags);
 }
@@ -913,10 +853,9 @@ xpc_partition_engaged(u64 partid_mask)
 {
        AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
 
-
        /* return our partition's AMO variable ANDed with partid_mask */
-       return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
-                                                               partid_mask);
+       return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
+               partid_mask);
 }
 
 static inline u64
@@ -924,10 +863,9 @@ xpc_partition_disengage_requested(u64 partid_mask)
 {
        AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
 
-
        /* return our partition's AMO variable ANDed with partid_mask */
-       return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
-                                                               partid_mask);
+       return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
+               partid_mask);
 }
 
 static inline void
@@ -935,10 +873,9 @@ xpc_clear_partition_engaged(u64 partid_mask)
 {
        AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
 
-
        /* clear bit(s) based on partid_mask in our partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                                               ~partid_mask);
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+                        ~partid_mask);
 }
 
 static inline void
@@ -946,14 +883,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
 {
        AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
 
-
        /* clear bit(s) based on partid_mask in our partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                                               ~partid_mask);
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+                        ~partid_mask);
 }
 
-
-
 /*
  * The following set of macros and inlines are used for the sending and
  * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -964,20 +898,18 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
 static inline u64
 xpc_IPI_receive(AMO_t *amo)
 {
-       return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
+       return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
 }
 
-
 static inline enum xpc_retval
 xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
 {
        int ret = 0;
        unsigned long irq_flags;
 
-
        local_irq_save(irq_flags);
 
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
+       FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
        sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
 
        /*
@@ -986,15 +918,14 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
         * didn't, we'd never know that the other partition is down and would
         * keep sending IPIs and AMOs to it until the heartbeat times out.
         */
-       ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
-                               xp_nofault_PIOR_target));
+       ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
+                                                    xp_nofault_PIOR_target));
 
        local_irq_restore(irq_flags);
 
        return ((ret == 0) ? xpcSuccess : xpcPioReadError);
 }
 
-
 /*
  * IPIs associated with SGI_XPC_ACTIVATE IRQ.
  */
@@ -1004,47 +935,47 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
  */
 static inline void
 xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
-                       int to_phys_cpuid)
+                     int to_phys_cpuid)
 {
        int w_index = XPC_NASID_W_INDEX(from_nasid);
        int b_index = XPC_NASID_B_INDEX(from_nasid);
-       AMO_t *amos = (AMO_t *) __va(amos_page_pa +
-                               (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
+       AMO_t *amos = (AMO_t *)__va(amos_page_pa +
+                                   (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
 
-
-       (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
-                               to_phys_cpuid, SGI_XPC_ACTIVATE);
+       (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
+                          to_phys_cpuid, SGI_XPC_ACTIVATE);
 }
 
 static inline void
 xpc_IPI_send_activate(struct xpc_vars *vars)
 {
        xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
-                               vars->act_nasid, vars->act_phys_cpuid);
+                             vars->act_nasid, vars->act_phys_cpuid);
 }
 
 static inline void
 xpc_IPI_send_activated(struct xpc_partition *part)
 {
        xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
-                       part->remote_act_nasid, part->remote_act_phys_cpuid);
+                             part->remote_act_nasid,
+                             part->remote_act_phys_cpuid);
 }
 
 static inline void
 xpc_IPI_send_reactivate(struct xpc_partition *part)
 {
        xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
-                               xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
+                             xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
 }
 
 static inline void
 xpc_IPI_send_disengage(struct xpc_partition *part)
 {
        xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
-                       part->remote_act_nasid, part->remote_act_phys_cpuid);
+                             part->remote_act_nasid,
+                             part->remote_act_phys_cpuid);
 }
 
-
 /*
  * IPIs associated with SGI_XPC_NOTIFY IRQ.
  */
@@ -1058,33 +989,28 @@ xpc_IPI_send_disengage(struct xpc_partition *part)
 
 static inline void
 xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
-                       unsigned long *irq_flags)
+                   unsigned long *irq_flags)
 {
        struct xpc_partition *part = &xpc_partitions[ch->partid];
        enum xpc_retval ret;
 
-
        if (likely(part->act_state != XPC_P_DEACTIVATING)) {
                ret = xpc_IPI_send(part->remote_IPI_amo_va,
-                                       (u64) ipi_flag << (ch->number * 8),
-                                       part->remote_IPI_nasid,
-                                       part->remote_IPI_phys_cpuid,
-                                       SGI_XPC_NOTIFY);
+                                  (u64)ipi_flag << (ch->number * 8),
+                                  part->remote_IPI_nasid,
+                                  part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
                dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
                        ipi_flag_string, ch->partid, ch->number, ret);
                if (unlikely(ret != xpcSuccess)) {
-                       if (irq_flags != NULL) {
+                       if (irq_flags != NULL)
                                spin_unlock_irqrestore(&ch->lock, *irq_flags);
-                       }
                        XPC_DEACTIVATE_PARTITION(part, ret);
-                       if (irq_flags != NULL) {
+                       if (irq_flags != NULL)
                                spin_lock_irqsave(&ch->lock, *irq_flags);
-                       }
                }
        }
 }
 
-
 /*
  * Make it look like the remote partition, which is associated with the
  * specified channel, sent us an IPI. This faked IPI will be handled
@@ -1095,18 +1021,16 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
 
 static inline void
 xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
-                               char *ipi_flag_string)
+                         char *ipi_flag_string)
 {
        struct xpc_partition *part = &xpc_partitions[ch->partid];
 
-
-       FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
-                       FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
+       FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
+                        FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
        dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
                ipi_flag_string, ch->partid, ch->number);
 }
 
-
 /*
  * The sending and receiving of IPIs includes the setting of an AMO variable
  * to indicate the reason the IPI was sent. The 64-bit variable is divided
@@ -1121,21 +1045,18 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
 #define        XPC_IPI_OPENREPLY       0x08
 #define        XPC_IPI_MSGREQUEST      0x10
 
-
 /* given an AMO variable and a channel#, get its associated IPI flags */
 #define XPC_GET_IPI_FLAGS(_amo, _c)    ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
 #define XPC_SET_IPI_FLAGS(_amo, _c, _f)        (_amo) |= ((u64) (_f) << ((_c) * 8))
 
-#define        XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
-#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & __IA64_UL_CONST(0x1010101010101010))
-
+#define        XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
+#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010UL)
 
 static inline void
 xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
 {
        struct xpc_openclose_args *args = ch->local_openclose_args;
 
-
        args->reason = ch->reason;
 
        XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
@@ -1152,7 +1073,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
 {
        struct xpc_openclose_args *args = ch->local_openclose_args;
 
-
        args->msg_size = ch->msg_size;
        args->local_nentries = ch->local_nentries;
 
@@ -1164,7 +1084,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
 {
        struct xpc_openclose_args *args = ch->local_openclose_args;
 
-
        args->remote_nentries = ch->remote_nentries;
        args->local_nentries = ch->local_nentries;
        args->local_msgqueue_pa = __pa(ch->local_msgqueue);
@@ -1184,7 +1103,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
        XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
 }
 
-
 /*
  * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
  * pages are located in the lowest granule. The lowest granule uses 4k pages
@@ -1201,13 +1119,10 @@ xpc_IPI_init(int index)
 {
        AMO_t *amo = xpc_vars->amos_page + index;
 
-
-       (void) xpc_IPI_receive(amo);    /* clear AMO variable */
+       (void)xpc_IPI_receive(amo);     /* clear AMO variable */
        return amo;
 }
 
-
-
 static inline enum xpc_retval
 xpc_map_bte_errors(bte_result_t error)
 {
@@ -1220,22 +1135,31 @@ xpc_map_bte_errors(bte_result_t error)
                return xpcBteUnmappedError;
        }
        switch (error) {
-       case BTE_SUCCESS:       return xpcSuccess;
-       case BTEFAIL_DIR:       return xpcBteDirectoryError;
-       case BTEFAIL_POISON:    return xpcBtePoisonError;
-       case BTEFAIL_WERR:      return xpcBteWriteError;
-       case BTEFAIL_ACCESS:    return xpcBteAccessError;
-       case BTEFAIL_PWERR:     return xpcBtePWriteError;
-       case BTEFAIL_PRERR:     return xpcBtePReadError;
-       case BTEFAIL_TOUT:      return xpcBteTimeOutError;
-       case BTEFAIL_XTERR:     return xpcBteXtalkError;
-       case BTEFAIL_NOTAVAIL:  return xpcBteNotAvailable;
-       default:                return xpcBteUnmappedError;
+       case BTE_SUCCESS:
+               return xpcSuccess;
+       case BTEFAIL_DIR:
+               return xpcBteDirectoryError;
+       case BTEFAIL_POISON:
+               return xpcBtePoisonError;
+       case BTEFAIL_WERR:
+               return xpcBteWriteError;
+       case BTEFAIL_ACCESS:
+               return xpcBteAccessError;
+       case BTEFAIL_PWERR:
+               return xpcBtePWriteError;
+       case BTEFAIL_PRERR:
+               return xpcBtePReadError;
+       case BTEFAIL_TOUT:
+               return xpcBteTimeOutError;
+       case BTEFAIL_XTERR:
+               return xpcBteXtalkError;
+       case BTEFAIL_NOTAVAIL:
+               return xpcBteNotAvailable;
+       default:
+               return xpcBteUnmappedError;
        }
 }
 
-
-
 /*
  * Check to see if there is any channel activity to/from the specified
  * partition.
@@ -1246,11 +1170,9 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
        u64 IPI_amo;
        unsigned long irq_flags;
 
-
        IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
-       if (IPI_amo == 0) {
+       if (IPI_amo == 0)
                return;
-       }
 
        spin_lock_irqsave(&part->IPI_lock, irq_flags);
        part->local_IPI_amo |= IPI_amo;
@@ -1262,6 +1184,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
        xpc_wakeup_channel_mgr(part);
 }
 
-
-#endif /* _ASM_IA64_SN_XPC_H */
-
+#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
similarity index 87%
rename from arch/ia64/sn/kernel/xpc_channel.c
rename to drivers/misc/sgi-xp/xpc_channel.c
index 44ccc0d..bfcb9ea 100644 (file)
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) channel support.
  *
@@ -15,7 +14,6 @@
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -25,8 +23,7 @@
 #include <linux/completion.h>
 #include <asm/sn/bte.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/xpc.h>
-
+#include "xpc.h"
 
 /*
  * Guarantee that the kzalloc'd memory is cacheline aligned.
@@ -36,22 +33,21 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
 {
        /* see if kzalloc will give us cachline aligned memory by default */
        *base = kzalloc(size, flags);
-       if (*base == NULL) {
+       if (*base == NULL)
                return NULL;
-       }
-       if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+
+       if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
                return *base;
-       }
+
        kfree(*base);
 
        /* nope, we'll have to do it ourselves */
        *base = kzalloc(size + L1_CACHE_BYTES, flags);
-       if (*base == NULL) {
+       if (*base == NULL)
                return NULL;
-       }
-       return (void *) L1_CACHE_ALIGN((u64) *base);
-}
 
+       return (void *)L1_CACHE_ALIGN((u64)*base);
+}
 
 /*
  * Set up the initial values for the XPartition Communication channels.
@@ -62,7 +58,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
        int ch_number;
        struct xpc_channel *ch;
 
-
        for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
                ch = &part->channels[ch_number];
 
@@ -72,7 +67,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
 
                ch->local_GP = &part->local_GPs[ch_number];
                ch->local_openclose_args =
-                                       &part->local_openclose_args[ch_number];
+                   &part->local_openclose_args[ch_number];
 
                atomic_set(&ch->kthreads_assigned, 0);
                atomic_set(&ch->kthreads_idle, 0);
@@ -91,7 +86,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
        }
 }
 
-
 /*
  * Setup the infrastructure necessary to support XPartition Communication
  * between the specified remote partition and the local one.
@@ -103,7 +97,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
        struct timer_list *timer;
        partid_t partid = XPC_PARTID(part);
 
-
        /*
         * Zero out MOST of the entry for this partition. Only the fields
         * starting with `nchannels' will be zeroed. The preceding fields must
@@ -111,14 +104,14 @@ xpc_setup_infrastructure(struct xpc_partition *part)
         * referenced during this memset() operation.
         */
        memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
-                               offsetof(struct xpc_partition, nchannels));
+              offsetof(struct xpc_partition, nchannels));
 
        /*
         * Allocate all of the channel structures as a contiguous chunk of
         * memory.
         */
        part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
-                                                               GFP_KERNEL);
+                                GFP_KERNEL);
        if (part->channels == NULL) {
                dev_err(xpc_chan, "can't get memory for channels\n");
                return xpcNoMemory;
@@ -126,11 +119,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
 
        part->nchannels = XPC_NCHANNELS;
 
-
        /* allocate all the required GET/PUT values */
 
        part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
-                                       GFP_KERNEL, &part->local_GPs_base);
+                                                       GFP_KERNEL,
+                                                       &part->local_GPs_base);
        if (part->local_GPs == NULL) {
                kfree(part->channels);
                part->channels = NULL;
@@ -140,7 +133,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
        }
 
        part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
-                                       GFP_KERNEL, &part->remote_GPs_base);
+                                                        GFP_KERNEL,
+                                                        &part->
+                                                        remote_GPs_base);
        if (part->remote_GPs == NULL) {
                dev_err(xpc_chan, "can't get memory for remote get/put "
                        "values\n");
@@ -151,12 +146,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
                return xpcNoMemory;
        }
 
-
        /* allocate all the required open and close args */
 
-       part->local_openclose_args = xpc_kzalloc_cacheline_aligned(
-                                       XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
-                                       &part->local_openclose_args_base);
+       part->local_openclose_args =
+           xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+                                         &part->local_openclose_args_base);
        if (part->local_openclose_args == NULL) {
                dev_err(xpc_chan, "can't get memory for local connect args\n");
                kfree(part->remote_GPs_base);
@@ -168,9 +162,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
                return xpcNoMemory;
        }
 
-       part->remote_openclose_args = xpc_kzalloc_cacheline_aligned(
-                                       XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
-                                       &part->remote_openclose_args_base);
+       part->remote_openclose_args =
+           xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+                                         &part->remote_openclose_args_base);
        if (part->remote_openclose_args == NULL) {
                dev_err(xpc_chan, "can't get memory for remote connect args\n");
                kfree(part->local_openclose_args_base);
@@ -184,13 +178,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
                return xpcNoMemory;
        }
 
-
        xpc_initialize_channels(part, partid);
 
        atomic_set(&part->nchannels_active, 0);
        atomic_set(&part->nchannels_engaged, 0);
 
-
        /* local_IPI_amo were set to 0 by an earlier memset() */
 
        /* Initialize this partitions AMO_t structure */
@@ -203,7 +195,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
 
        sprintf(part->IPI_owner, "xpc%02d", partid);
        ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
-                               part->IPI_owner, (void *) (u64) partid);
+                         part->IPI_owner, (void *)(u64)partid);
        if (ret != 0) {
                dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
                        "errno=%d\n", -ret);
@@ -223,8 +215,8 @@ xpc_setup_infrastructure(struct xpc_partition *part)
        /* Setup a timer to check for dropped IPIs */
        timer = &part->dropped_IPI_timer;
        init_timer(timer);
-       timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
-       timer->data = (unsigned long) part;
+       timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
+       timer->data = (unsigned long)part;
        timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
        add_timer(timer);
 
@@ -234,7 +226,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
         */
        part->setup_state = XPC_P_SETUP;
 
-
        /*
         * Setup the per partition specific variables required by the
         * remote partition to establish channel connections with us.
@@ -244,7 +235,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
         */
        xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
        xpc_vars_part[partid].openclose_args_pa =
-                                       __pa(part->local_openclose_args);
+           __pa(part->local_openclose_args);
        xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
        cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
        xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
@@ -255,7 +246,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
        return xpcSuccess;
 }
 
-
 /*
  * Create a wrapper that hides the underlying mechanism for pulling a cacheline
  * (or multiple cachelines) from a remote partition.
@@ -266,24 +256,21 @@ xpc_setup_infrastructure(struct xpc_partition *part)
  */
 static enum xpc_retval
 xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
-                               const void *src, size_t cnt)
+                          const void *src, size_t cnt)
 {
        bte_result_t bte_ret;
 
-
-       DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
-       DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
+       DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
+       DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
        DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
 
-       if (part->act_state == XPC_P_DEACTIVATING) {
+       if (part->act_state == XPC_P_DEACTIVATING)
                return part->reason;
-       }
 
-       bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
-                                       (BTE_NORMAL | BTE_WACQUIRE), NULL);
-       if (bte_ret == BTE_SUCCESS) {
+       bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
+                             (BTE_NORMAL | BTE_WACQUIRE), NULL);
+       if (bte_ret == BTE_SUCCESS)
                return xpcSuccess;
-       }
 
        dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
                XPC_PARTID(part), bte_ret);
@@ -291,7 +278,6 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
        return xpc_map_bte_errors(bte_ret);
 }
 
-
 /*
  * Pull the remote per partition specific variables from the specified
  * partition.
@@ -301,41 +287,40 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
 {
        u8 buffer[L1_CACHE_BYTES * 2];
        struct xpc_vars_part *pulled_entry_cacheline =
-                       (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
+           (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
        struct xpc_vars_part *pulled_entry;
        u64 remote_entry_cacheline_pa, remote_entry_pa;
        partid_t partid = XPC_PARTID(part);
        enum xpc_retval ret;
 
-
        /* pull the cacheline that contains the variables we're interested in */
 
        DBUG_ON(part->remote_vars_part_pa !=
-                               L1_CACHE_ALIGN(part->remote_vars_part_pa));
+               L1_CACHE_ALIGN(part->remote_vars_part_pa));
        DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
 
        remote_entry_pa = part->remote_vars_part_pa +
-                       sn_partition_id * sizeof(struct xpc_vars_part);
+           sn_partition_id * sizeof(struct xpc_vars_part);
 
        remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
 
-       pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
-                               (remote_entry_pa & (L1_CACHE_BYTES - 1)));
+       pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
+                                               (remote_entry_pa &
+                                                (L1_CACHE_BYTES - 1)));
 
        ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
-                                       (void *) remote_entry_cacheline_pa,
-                                       L1_CACHE_BYTES);
+                                        (void *)remote_entry_cacheline_pa,
+                                        L1_CACHE_BYTES);
        if (ret != xpcSuccess) {
                dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
                        "partition %d, ret=%d\n", partid, ret);
                return ret;
        }
 
-
        /* see if they've been set up yet */
 
        if (pulled_entry->magic != XPC_VP_MAGIC1 &&
-                               pulled_entry->magic != XPC_VP_MAGIC2) {
+           pulled_entry->magic != XPC_VP_MAGIC2) {
 
                if (pulled_entry->magic != 0) {
                        dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -353,8 +338,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
                /* validate the variables */
 
                if (pulled_entry->GPs_pa == 0 ||
-                               pulled_entry->openclose_args_pa == 0 ||
-                                       pulled_entry->IPI_amo_pa == 0) {
+                   pulled_entry->openclose_args_pa == 0 ||
+                   pulled_entry->IPI_amo_pa == 0) {
 
                        dev_err(xpc_chan, "partition %d's XPC vars_part for "
                                "partition %d are not valid\n", partid,
@@ -366,29 +351,26 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
 
                part->remote_GPs_pa = pulled_entry->GPs_pa;
                part->remote_openclose_args_pa =
-                                       pulled_entry->openclose_args_pa;
+                   pulled_entry->openclose_args_pa;
                part->remote_IPI_amo_va =
-                                     (AMO_t *) __va(pulled_entry->IPI_amo_pa);
+                   (AMO_t *)__va(pulled_entry->IPI_amo_pa);
                part->remote_IPI_nasid = pulled_entry->IPI_nasid;
                part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
 
-               if (part->nchannels > pulled_entry->nchannels) {
+               if (part->nchannels > pulled_entry->nchannels)
                        part->nchannels = pulled_entry->nchannels;
-               }
 
                /* let the other side know that we've pulled their variables */
 
                xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
        }
 
-       if (pulled_entry->magic == XPC_VP_MAGIC1) {
+       if (pulled_entry->magic == XPC_VP_MAGIC1)
                return xpcRetry;
-       }
 
        return xpcSuccess;
 }
 
-
 /*
  * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
  */
@@ -399,23 +381,23 @@ xpc_get_IPI_flags(struct xpc_partition *part)
        u64 IPI_amo;
        enum xpc_retval ret;
 
-
        /*
         * See if there are any IPI flags to be handled.
         */
 
        spin_lock_irqsave(&part->IPI_lock, irq_flags);
-       if ((IPI_amo = part->local_IPI_amo) != 0) {
+       IPI_amo = part->local_IPI_amo;
+       if (IPI_amo != 0)
                part->local_IPI_amo = 0;
-       }
-       spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
 
+       spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
 
        if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
                ret = xpc_pull_remote_cachelines(part,
-                                       part->remote_openclose_args,
-                                       (void *) part->remote_openclose_args_pa,
-                                       XPC_OPENCLOSE_ARGS_SIZE);
+                                                part->remote_openclose_args,
+                                                (void *)part->
+                                                remote_openclose_args_pa,
+                                                XPC_OPENCLOSE_ARGS_SIZE);
                if (ret != xpcSuccess) {
                        XPC_DEACTIVATE_PARTITION(part, ret);
 
@@ -430,8 +412,8 @@ xpc_get_IPI_flags(struct xpc_partition *part)
 
        if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
                ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
-                                               (void *) part->remote_GPs_pa,
-                                               XPC_GP_SIZE);
+                                                (void *)part->remote_GPs_pa,
+                                                XPC_GP_SIZE);
                if (ret != xpcSuccess) {
                        XPC_DEACTIVATE_PARTITION(part, ret);
 
@@ -446,7 +428,6 @@ xpc_get_IPI_flags(struct xpc_partition *part)
        return IPI_amo;
 }
 
-
 /*
  * Allocate the local message queue and the notify queue.
  */
@@ -457,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
        int nentries;
        size_t nbytes;
 
-
-       // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
-       // >>> iterations of the for-loop, bail if set?
-
-       // >>> should we impose a minimum #of entries? like 4 or 8?
        for (nentries = ch->local_nentries; nentries > 0; nentries--) {
 
                nbytes = nentries * ch->msg_size;
                ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
-                                               GFP_KERNEL,
-                                               &ch->local_msgqueue_base);
-               if (ch->local_msgqueue == NULL) {
+                                                                  GFP_KERNEL,
+                                                     &ch->local_msgqueue_base);
+               if (ch->local_msgqueue == NULL)
                        continue;
-               }
 
                nbytes = nentries * sizeof(struct xpc_notify);
                ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -497,7 +472,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
        return xpcNoMemory;
 }
 
-
 /*
  * Allocate the cached remote message queue.
  */
@@ -508,22 +482,16 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
        int nentries;
        size_t nbytes;
 
-
        DBUG_ON(ch->remote_nentries <= 0);
 
-       // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
-       // >>> iterations of the for-loop, bail if set?
-
-       // >>> should we impose a minimum #of entries? like 4 or 8?
        for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
 
                nbytes = nentries * ch->msg_size;
                ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
-                                               GFP_KERNEL,
-                                               &ch->remote_msgqueue_base);
-               if (ch->remote_msgqueue == NULL) {
+                                                                   GFP_KERNEL,
+                                                    &ch->remote_msgqueue_base);
+               if (ch->remote_msgqueue == NULL)
                        continue;
-               }
 
                spin_lock_irqsave(&ch->lock, irq_flags);
                if (nentries < ch->remote_nentries) {
@@ -542,7 +510,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
        return xpcNoMemory;
 }
 
-
 /*
  * Allocate message queues and other stuff associated with a channel.
  *
@@ -554,14 +521,14 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
        unsigned long irq_flags;
        enum xpc_retval ret;
 
-
        DBUG_ON(ch->flags & XPC_C_SETUP);
 
-       if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
+       ret = xpc_allocate_local_msgqueue(ch);
+       if (ret != xpcSuccess)
                return ret;
-       }
 
-       if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
+       ret = xpc_allocate_remote_msgqueue(ch);
+       if (ret != xpcSuccess) {
                kfree(ch->local_msgqueue_base);
                ch->local_msgqueue = NULL;
                kfree(ch->notify_queue);
@@ -576,7 +543,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
        return xpcSuccess;
 }
 
-
 /*
  * Process a connect message from a remote partition.
  *
@@ -588,11 +554,10 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
 {
        enum xpc_retval ret;
 
-
        DBUG_ON(!spin_is_locked(&ch->lock));
 
        if (!(ch->flags & XPC_C_OPENREQUEST) ||
-                               !(ch->flags & XPC_C_ROPENREQUEST)) {
+           !(ch->flags & XPC_C_ROPENREQUEST)) {
                /* nothing more to do for now */
                return;
        }
@@ -603,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
                ret = xpc_allocate_msgqueues(ch);
                spin_lock_irqsave(&ch->lock, *irq_flags);
 
-               if (ret != xpcSuccess) {
+               if (ret != xpcSuccess)
                        XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
-               }
-               if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
+
+               if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
                        return;
-               }
 
                DBUG_ON(!(ch->flags & XPC_C_SETUP));
                DBUG_ON(ch->local_msgqueue == NULL);
@@ -620,23 +584,21 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
                xpc_IPI_send_openreply(ch, irq_flags);
        }
 
-       if (!(ch->flags & XPC_C_ROPENREPLY)) {
+       if (!(ch->flags & XPC_C_ROPENREPLY))
                return;
-       }
 
        DBUG_ON(ch->remote_msgqueue_pa == 0);
 
        ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);    /* clear all else */
 
        dev_info(xpc_chan, "channel %d to partition %d connected\n",
-               ch->number, ch->partid);
+                ch->number, ch->partid);
 
        spin_unlock_irqrestore(&ch->lock, *irq_flags);
        xpc_create_kthreads(ch, 1, 0);
        spin_lock_irqsave(&ch->lock, *irq_flags);
 }
 
-
 /*
  * Notify those who wanted to be notified upon delivery of their message.
  */
@@ -647,7 +609,6 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
        u8 notify_type;
        s64 get = ch->w_remote_GP.get - 1;
 
-
        while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
 
                notify = &ch->notify_queue[get % ch->local_nentries];
@@ -660,8 +621,7 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
                 */
                notify_type = notify->type;
                if (notify_type == 0 ||
-                               cmpxchg(&notify->type, notify_type, 0) !=
-                                                               notify_type) {
+                   cmpxchg(&notify->type, notify_type, 0) != notify_type) {
                        continue;
                }
 
@@ -672,20 +632,19 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
                if (notify->func != NULL) {
                        dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
                                "msg_number=%ld, partid=%d, channel=%d\n",
-                               (void *) notify, get, ch->partid, ch->number);
+                               (void *)notify, get, ch->partid, ch->number);
 
                        notify->func(reason, ch->partid, ch->number,
-                                                               notify->key);
+                                    notify->key);
 
                        dev_dbg(xpc_chan, "notify->func() returned, "
                                "notify=0x%p, msg_number=%ld, partid=%d, "
-                               "channel=%d\n", (void *) notify, get,
+                               "channel=%d\n", (void *)notify, get,
                                ch->partid, ch->number);
                }
        }
 }
 
-
 /*
  * Free up message queues and other stuff that were allocated for the specified
  * channel.
@@ -733,7 +692,6 @@ xpc_free_msgqueues(struct xpc_channel *ch)
        }
 }
 
-
 /*
  * spin_lock_irqsave() is expected to be held on entry.
  */
@@ -743,46 +701,41 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
        struct xpc_partition *part = &xpc_partitions[ch->partid];
        u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
 
-
        DBUG_ON(!spin_is_locked(&ch->lock));
 
-       if (!(ch->flags & XPC_C_DISCONNECTING)) {
+       if (!(ch->flags & XPC_C_DISCONNECTING))
                return;
-       }
 
        DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
 
        /* make sure all activity has settled down first */
 
        if (atomic_read(&ch->kthreads_assigned) > 0 ||
-                               atomic_read(&ch->references) > 0) {
+           atomic_read(&ch->references) > 0) {
                return;
        }
        DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
-                       !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
+               !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
 
        if (part->act_state == XPC_P_DEACTIVATING) {
                /* can't proceed until the other side disengages from us */
-               if (xpc_partition_engaged(1UL << ch->partid)) {
+               if (xpc_partition_engaged(1UL << ch->partid))
                        return;
-               }
 
        } else {
 
                /* as long as the other side is up do the full protocol */
 
-               if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
+               if (!(ch->flags & XPC_C_RCLOSEREQUEST))
                        return;
-               }
 
                if (!(ch->flags & XPC_C_CLOSEREPLY)) {
                        ch->flags |= XPC_C_CLOSEREPLY;
                        xpc_IPI_send_closereply(ch, irq_flags);
                }
 
-               if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
+               if (!(ch->flags & XPC_C_RCLOSEREPLY))
                        return;
-               }
        }
 
        /* wake those waiting for notify completion */
@@ -809,7 +762,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
 
        if (channel_was_connected) {
                dev_info(xpc_chan, "channel %d to partition %d disconnected, "
-                       "reason=%d\n", ch->number, ch->partid, ch->reason);
+                        "reason=%d\n", ch->number, ch->partid, ch->reason);
        }
 
        if (ch->flags & XPC_C_WDISCONNECT) {
@@ -820,35 +773,32 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
                        /* time to take action on any delayed IPI flags */
                        spin_lock(&part->IPI_lock);
                        XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
-                                                       ch->delayed_IPI_flags);
+                                         ch->delayed_IPI_flags);
                        spin_unlock(&part->IPI_lock);
                }
                ch->delayed_IPI_flags = 0;
        }
 }
 
-
 /*
  * Process a change in the channel's remote connection state.
  */
 static void
 xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
-                               u8 IPI_flags)
+                         u8 IPI_flags)
 {
        unsigned long irq_flags;
        struct xpc_openclose_args *args =
-                               &part->remote_openclose_args[ch_number];
+           &part->remote_openclose_args[ch_number];
        struct xpc_channel *ch = &part->channels[ch_number];
        enum xpc_retval reason;
 
-
-
        spin_lock_irqsave(&ch->lock, irq_flags);
 
 again:
 
        if ((ch->flags & XPC_C_DISCONNECTED) &&
-                                       (ch->flags & XPC_C_WDISCONNECT)) {
+           (ch->flags & XPC_C_WDISCONNECT)) {
                /*
                 * Delay processing IPI flags until thread waiting disconnect
                 * has had a chance to see that the channel is disconnected.
@@ -858,7 +808,6 @@ again:
                return;
        }
 
-
        if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
 
                dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
@@ -890,13 +839,14 @@ again:
                if (ch->flags & XPC_C_DISCONNECTED) {
                        if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
                                if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
-                                        ch_number) & XPC_IPI_OPENREQUEST)) {
+                                                      ch_number) &
+                                    XPC_IPI_OPENREQUEST)) {
 
                                        DBUG_ON(ch->delayed_IPI_flags != 0);
                                        spin_lock(&part->IPI_lock);
                                        XPC_SET_IPI_FLAGS(part->local_IPI_amo,
-                                                       ch_number,
-                                                       XPC_IPI_CLOSEREQUEST);
+                                                         ch_number,
+                                                         XPC_IPI_CLOSEREQUEST);
                                        spin_unlock(&part->IPI_lock);
                                }
                                spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -921,11 +871,10 @@ again:
 
                if (!(ch->flags & XPC_C_DISCONNECTING)) {
                        reason = args->reason;
-                       if (reason <= xpcSuccess || reason > xpcUnknownReason) {
+                       if (reason <= xpcSuccess || reason > xpcUnknownReason)
                                reason = xpcUnknownReason;
-                       } else if (reason == xpcUnregistering) {
+                       else if (reason == xpcUnregistering)
                                reason = xpcOtherUnregistering;
-                       }
 
                        XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
 
@@ -937,7 +886,6 @@ again:
                xpc_process_disconnect(ch, &irq_flags);
        }
 
-
        if (IPI_flags & XPC_IPI_CLOSEREPLY) {
 
                dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
@@ -953,12 +901,13 @@ again:
 
                if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
                        if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
-                                               & XPC_IPI_CLOSEREQUEST)) {
+                            & XPC_IPI_CLOSEREQUEST)) {
 
                                DBUG_ON(ch->delayed_IPI_flags != 0);
                                spin_lock(&part->IPI_lock);
                                XPC_SET_IPI_FLAGS(part->local_IPI_amo,
-                                               ch_number, XPC_IPI_CLOSEREPLY);
+                                                 ch_number,
+                                                 XPC_IPI_CLOSEREPLY);
                                spin_unlock(&part->IPI_lock);
                        }
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -973,7 +922,6 @@ again:
                }
        }
 
-
        if (IPI_flags & XPC_IPI_OPENREQUEST) {
 
                dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
@@ -982,7 +930,7 @@ again:
                        ch->partid, ch->number);
 
                if (part->act_state == XPC_P_DEACTIVATING ||
-                                       (ch->flags & XPC_C_ROPENREQUEST)) {
+                   (ch->flags & XPC_C_ROPENREQUEST)) {
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
                        return;
                }
@@ -993,9 +941,9 @@ again:
                        return;
                }
                DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
-                                                       XPC_C_OPENREQUEST)));
+                                      XPC_C_OPENREQUEST)));
                DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
-                                       XPC_C_OPENREPLY | XPC_C_CONNECTED));
+                                    XPC_C_OPENREPLY | XPC_C_CONNECTED));
 
                /*
                 * The meaningful OPENREQUEST connection state fields are:
@@ -1011,11 +959,10 @@ again:
                ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
                ch->remote_nentries = args->local_nentries;
 
-
                if (ch->flags & XPC_C_OPENREQUEST) {
                        if (args->msg_size != ch->msg_size) {
                                XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
-                                                               &irq_flags);
+                                                      &irq_flags);
                                spin_unlock_irqrestore(&ch->lock, irq_flags);
                                return;
                        }
@@ -1031,7 +978,6 @@ again:
                xpc_process_connect(ch, &irq_flags);
        }
 
-
        if (IPI_flags & XPC_IPI_OPENREPLY) {
 
                dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
@@ -1046,7 +992,7 @@ again:
                }
                if (!(ch->flags & XPC_C_OPENREQUEST)) {
                        XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
-                                                               &irq_flags);
+                                              &irq_flags);
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
                        return;
                }
@@ -1057,7 +1003,7 @@ again:
                /*
                 * The meaningful OPENREPLY connection state fields are:
                 *      local_msgqueue_pa = physical address of remote
-                *                          partition's local_msgqueue
+                *                          partition's local_msgqueue
                 *      local_nentries = remote partition's local_nentries
                 *      remote_nentries = remote partition's remote_nentries
                 */
@@ -1093,7 +1039,6 @@ again:
        spin_unlock_irqrestore(&ch->lock, irq_flags);
 }
 
-
 /*
  * Attempt to establish a channel connection to a remote partition.
  */
@@ -1103,10 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch)
        unsigned long irq_flags;
        struct xpc_registration *registration = &xpc_registrations[ch->number];
 
-
-       if (mutex_trylock(&registration->mutex) == 0) {
+       if (mutex_trylock(&registration->mutex) == 0)
                return xpcRetry;
-       }
 
        if (!XPC_CHANNEL_REGISTERED(ch->number)) {
                mutex_unlock(&registration->mutex);
@@ -1124,7 +1067,6 @@ xpc_connect_channel(struct xpc_channel *ch)
                return ch->reason;
        }
 
-
        /* add info from the channel connect registration to the channel */
 
        ch->kthreads_assigned_limit = registration->assigned_limit;
@@ -1154,7 +1096,7 @@ xpc_connect_channel(struct xpc_channel *ch)
                         */
                        mutex_unlock(&registration->mutex);
                        XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
-                                                               &irq_flags);
+                                              &irq_flags);
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
                        return xpcUnequalMsgSizes;
                }
@@ -1169,7 +1111,6 @@ xpc_connect_channel(struct xpc_channel *ch)
 
        mutex_unlock(&registration->mutex);
 
-
        /* initiate the connection */
 
        ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
@@ -1182,7 +1123,6 @@ xpc_connect_channel(struct xpc_channel *ch)
        return xpcSuccess;
 }
 
-
 /*
  * Clear some of the msg flags in the local message queue.
  */
@@ -1192,16 +1132,15 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
        struct xpc_msg *msg;
        s64 get;
 
-
        get = ch->w_remote_GP.get;
        do {
-               msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
-                               (get % ch->local_nentries) * ch->msg_size);
+               msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+                                        (get % ch->local_nentries) *
+                                        ch->msg_size);
                msg->flags = 0;
-       } while (++get < (volatile s64) ch->remote_GP.get);
+       } while (++get < ch->remote_GP.get);
 }
 
-
 /*
  * Clear some of the msg flags in the remote message queue.
  */
@@ -1211,43 +1150,39 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
        struct xpc_msg *msg;
        s64 put;
 
-
        put = ch->w_remote_GP.put;
        do {
-               msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
-                               (put % ch->remote_nentries) * ch->msg_size);
+               msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
+                                        (put % ch->remote_nentries) *
+                                        ch->msg_size);
                msg->flags = 0;
-       } while (++put < (volatile s64) ch->remote_GP.put);
+       } while (++put < ch->remote_GP.put);
 }
 
-
 static void
 xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
 {
        struct xpc_channel *ch = &part->channels[ch_number];
        int nmsgs_sent;
 
-
        ch->remote_GP = part->remote_GPs[ch_number];
 
-
        /* See what, if anything, has changed for each connected channel */
 
        xpc_msgqueue_ref(ch);
 
        if (ch->w_remote_GP.get == ch->remote_GP.get &&
-                               ch->w_remote_GP.put == ch->remote_GP.put) {
+           ch->w_remote_GP.put == ch->remote_GP.put) {
                /* nothing changed since GPs were last pulled */
                xpc_msgqueue_deref(ch);
                return;
        }
 
-       if (!(ch->flags & XPC_C_CONNECTED)){
+       if (!(ch->flags & XPC_C_CONNECTED)) {
                xpc_msgqueue_deref(ch);
                return;
        }
 
-
        /*
         * First check to see if messages recently sent by us have been
         * received by the other side. (The remote GET value will have
@@ -1269,7 +1204,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
                         * received and delivered by the other side.
                         */
                        xpc_notify_senders(ch, xpcMsgDelivered,
-                                                       ch->remote_GP.get);
+                                          ch->remote_GP.get);
                }
 
                /*
@@ -1288,12 +1223,10 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
                 * If anyone was waiting for message queue entries to become
                 * available, wake them up.
                 */
-               if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+               if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
                        wake_up(&ch->msg_allocate_wq);
-               }
        }
 
-
        /*
         * Now check for newly sent messages by the other side. (The remote
         * PUT value will have changed since we last looked at it.)
@@ -1318,16 +1251,14 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
                                "delivered=%d, partid=%d, channel=%d\n",
                                nmsgs_sent, ch->partid, ch->number);
 
-                       if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
+                       if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
                                xpc_activate_kthreads(ch, nmsgs_sent);
-                       }
                }
        }
 
        xpc_msgqueue_deref(ch);
 }
 
-
 void
 xpc_process_channel_activity(struct xpc_partition *part)
 {
@@ -1337,7 +1268,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
        int ch_number;
        u32 ch_flags;
 
-
        IPI_amo = xpc_get_IPI_flags(part);
 
        /*
@@ -1350,7 +1280,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
        for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
                ch = &part->channels[ch_number];
 
-
                /*
                 * Process any open or close related IPI flags, and then deal
                 * with connecting or disconnecting the channel as required.
@@ -1358,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
 
                IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
 
-               if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
+               if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
                        xpc_process_openclose_IPI(part, ch_number, IPI_flags);
-               }
 
                ch_flags = ch->flags;   /* need an atomic snapshot of flags */
 
@@ -1371,14 +1299,13 @@ xpc_process_channel_activity(struct xpc_partition *part)
                        continue;
                }
 
-               if (part->act_state == XPC_P_DEACTIVATING) {
+               if (part->act_state == XPC_P_DEACTIVATING)
                        continue;
-               }
 
                if (!(ch_flags & XPC_C_CONNECTED)) {
                        if (!(ch_flags & XPC_C_OPENREQUEST)) {
                                DBUG_ON(ch_flags & XPC_C_SETUP);
-                               (void) xpc_connect_channel(ch);
+                               (void)xpc_connect_channel(ch);
                        } else {
                                spin_lock_irqsave(&ch->lock, irq_flags);
                                xpc_process_connect(ch, &irq_flags);
@@ -1387,20 +1314,17 @@ xpc_process_channel_activity(struct xpc_partition *part)
                        continue;
                }
 
-
                /*
                 * Process any message related IPI flags, this may involve the
                 * activation of kthreads to deliver any pending messages sent
                 * from the other partition.
                 */
 
-               if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
+               if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
                        xpc_process_msg_IPI(part, ch_number);
-               }
        }
 }
 
-
 /*
  * XPC's heartbeat code calls this function to inform XPC that a partition is
  * going down.  XPC responds by tearing down the XPartition Communication
@@ -1417,7 +1341,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
        int ch_number;
        struct xpc_channel *ch;
 
-
        dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
                XPC_PARTID(part), reason);
 
@@ -1426,7 +1349,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
                return;
        }
 
-
        /* disconnect channels associated with the partition going down */
 
        for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
@@ -1446,7 +1368,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
        xpc_part_deref(part);
 }
 
-
 /*
  * Teardown the infrastructure necessary to support XPartition Communication
  * between the specified remote partition and the local one.
@@ -1456,7 +1377,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
 {
        partid_t partid = XPC_PARTID(part);
 
-
        /*
         * We start off by making this partition inaccessible to local
         * processes by marking it as no longer setup. Then we make it
@@ -1473,9 +1393,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
 
        xpc_vars_part[partid].magic = 0;
 
-
-       free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
-
+       free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
 
        /*
         * Before proceeding with the teardown we have to wait until all
@@ -1483,7 +1401,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
         */
        wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
 
-
        /* now we can begin tearing down the infrastructure */
 
        part->setup_state = XPC_P_TORNDOWN;
@@ -1504,7 +1421,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
        part->local_IPI_amo_va = NULL;
 }
 
-
 /*
  * Called by XP at the time of channel connection registration to cause
  * XPC to establish connections to all currently active partitions.
@@ -1516,7 +1432,6 @@ xpc_initiate_connect(int ch_number)
        struct xpc_partition *part;
        struct xpc_channel *ch;
 
-
        DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 
        for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
@@ -1535,7 +1450,6 @@ xpc_initiate_connect(int ch_number)
        }
 }
 
-
 void
 xpc_connected_callout(struct xpc_channel *ch)
 {
@@ -1546,14 +1460,13 @@ xpc_connected_callout(struct xpc_channel *ch)
                        "partid=%d, channel=%d\n", ch->partid, ch->number);
 
                ch->func(xpcConnected, ch->partid, ch->number,
-                               (void *) (u64) ch->local_nentries, ch->key);
+                        (void *)(u64)ch->local_nentries, ch->key);
 
                dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
                        "partid=%d, channel=%d\n", ch->partid, ch->number);
        }
 }
 
-
 /*
  * Called by XP at the time of channel connection unregistration to cause
  * XPC to teardown all current connections for the specified channel.
@@ -1575,7 +1488,6 @@ xpc_initiate_disconnect(int ch_number)
        struct xpc_partition *part;
        struct xpc_channel *ch;
 
-
        DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 
        /* initiate the channel disconnect for every active partition */
@@ -1592,7 +1504,7 @@ xpc_initiate_disconnect(int ch_number)
                                ch->flags |= XPC_C_WDISCONNECT;
 
                                XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
-                                                               &irq_flags);
+                                                      &irq_flags);
                        }
 
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1605,7 +1517,6 @@ xpc_initiate_disconnect(int ch_number)
        xpc_disconnect_wait(ch_number);
 }
 
-
 /*
  * To disconnect a channel, and reflect it back to all who may be waiting.
  *
@@ -1617,16 +1528,15 @@ xpc_initiate_disconnect(int ch_number)
  */
 void
 xpc_disconnect_channel(const int line, struct xpc_channel *ch,
-                       enum xpc_retval reason, unsigned long *irq_flags)
+                      enum xpc_retval reason, unsigned long *irq_flags)
 {
        u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
 
-
        DBUG_ON(!spin_is_locked(&ch->lock));
 
-       if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
+       if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
                return;
-       }
+
        DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
 
        dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1637,14 +1547,13 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
        ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
        /* some of these may not have been set */
        ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
-                       XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
-                       XPC_C_CONNECTING | XPC_C_CONNECTED);
+                      XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+                      XPC_C_CONNECTING | XPC_C_CONNECTED);
 
        xpc_IPI_send_closerequest(ch, irq_flags);
 
-       if (channel_was_connected) {
+       if (channel_was_connected)
                ch->flags |= XPC_C_WASCONNECTED;
-       }
 
        spin_unlock_irqrestore(&ch->lock, *irq_flags);
 
@@ -1653,20 +1562,18 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
                wake_up_all(&ch->idle_wq);
 
        } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
-                       !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+                  !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
                /* start a kthread that will do the xpcDisconnecting callout */
                xpc_create_kthreads(ch, 1, 1);
        }
 
        /* wake those waiting to allocate an entry from the local msg queue */
-       if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+       if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
                wake_up(&ch->msg_allocate_wq);
-       }
 
        spin_lock_irqsave(&ch->lock, *irq_flags);
 }
 
-
 void
 xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
 {
@@ -1687,7 +1594,6 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
        }
 }
 
-
 /*
  * Wait for a message entry to become available for the specified channel,
  * but don't wait any longer than 1 jiffy.
@@ -1697,9 +1603,8 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
 {
        enum xpc_retval ret;
 
-
        if (ch->flags & XPC_C_DISCONNECTING) {
-               DBUG_ON(ch->reason == xpcInterrupted);  // >>> Is this true?
+               DBUG_ON(ch->reason == xpcInterrupted);
                return ch->reason;
        }
 
@@ -1709,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
 
        if (ch->flags & XPC_C_DISCONNECTING) {
                ret = ch->reason;
-               DBUG_ON(ch->reason == xpcInterrupted);  // >>> Is this true?
+               DBUG_ON(ch->reason == xpcInterrupted);
        } else if (ret == 0) {
                ret = xpcTimeout;
        } else {
@@ -1719,20 +1624,18 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
        return ret;
 }
 
-
 /*
  * Allocate an entry for a message from the message queue associated with the
  * specified channel.
  */
 static enum xpc_retval
 xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
-                       struct xpc_msg **address_of_msg)
+                struct xpc_msg **address_of_msg)
 {
        struct xpc_msg *msg;
        enum xpc_retval ret;
        s64 put;
 
-
        /* this reference will be dropped in xpc_send_msg() */
        xpc_msgqueue_ref(ch);
 
@@ -1745,7 +1648,6 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
                return xpcNotConnected;
        }
 
-
        /*
         * Get the next available message entry from the local message queue.
         * If none are available, we'll make sure that we grab the latest
@@ -1755,25 +1657,23 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
 
        while (1) {
 
-               put = (volatile s64) ch->w_local_GP.put;
-               if (put - (volatile s64) ch->w_remote_GP.get <
-                                                       ch->local_nentries) {
+               put = ch->w_local_GP.put;
+               rmb();  /* guarantee that .put loads before .get */
+               if (put - ch->w_remote_GP.get < ch->local_nentries) {
 
                        /* There are available message entries. We need to try
                         * to secure one for ourselves. We'll do this by trying
                         * to increment w_local_GP.put as long as someone else
                         * doesn't beat us to it. If they do, we'll have to
                         * try again.
-                        */
-                       if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
-                                                                       put) {
+                        */
+                       if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
                                /* we got the entry referenced by put */
                                break;
                        }
                        continue;       /* try again */
                }
 
-
                /*
                 * There aren't any available msg entries at this time.
                 *
@@ -1783,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
                 * that will cause the IPI handler to fetch the latest
                 * GP values as if an IPI was sent by the other side.
                 */
-               if (ret == xpcTimeout) {
+               if (ret == xpcTimeout)
                        xpc_IPI_send_local_msgrequest(ch);
-               }
 
                if (flags & XPC_NOWAIT) {
                        xpc_msgqueue_deref(ch);
@@ -1799,25 +1698,22 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
                }
        }
 
-
        /* get the message's address and initialize it */
-       msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
-                               (put % ch->local_nentries) * ch->msg_size);
-
+       msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+                                (put % ch->local_nentries) * ch->msg_size);
 
        DBUG_ON(msg->flags != 0);
        msg->number = put;
 
        dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
                "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
-               (void *) msg, msg->number, ch->partid, ch->number);
+               (void *)msg, msg->number, ch->partid, ch->number);
 
        *address_of_msg = msg;
 
        return xpcSuccess;
 }
 
-
 /*
  * Allocate an entry for a message from the message queue associated with the
  * specified channel. NOTE that this routine can sleep waiting for a message
@@ -1838,7 +1734,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
        enum xpc_retval ret = xpcUnknownReason;
        struct xpc_msg *msg = NULL;
 
-
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
        DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
 
@@ -1848,15 +1743,13 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
                ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
                xpc_part_deref(part);
 
-               if (msg != NULL) {
+               if (msg != NULL)
                        *payload = &msg->payload;
-               }
        }
 
        return ret;
 }
 
-
 /*
  * Now we actually send the messages that are ready to be sent by advancing
  * the local message queue's Put value and then send an IPI to the recipient
@@ -1869,20 +1762,18 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
        s64 put = initial_put + 1;
        int send_IPI = 0;
 
-
        while (1) {
 
                while (1) {
-                       if (put == (volatile s64) ch->w_local_GP.put) {
+                       if (put == ch->w_local_GP.put)
                                break;
-                       }
 
-                       msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
-                              (put % ch->local_nentries) * ch->msg_size);
+                       msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+                                                (put % ch->local_nentries) *
+                                                ch->msg_size);
 
-                       if (!(msg->flags & XPC_M_READY)) {
+                       if (!(msg->flags & XPC_M_READY))
                                break;
-                       }
 
                        put++;
                }
@@ -1893,9 +1784,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
                }
 
                if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
-                                                               initial_put) {
+                   initial_put) {
                        /* someone else beat us to it */
-                       DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
+                       DBUG_ON(ch->local_GP->put < initial_put);
                        break;
                }
 
@@ -1914,12 +1805,10 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
                initial_put = put;
        }
 
-       if (send_IPI) {
+       if (send_IPI)
                xpc_IPI_send_msgrequest(ch);
-       }
 }
 
-
 /*
  * Common code that does the actual sending of the message by advancing the
  * local message queue's Put value and sends an IPI to the partition the
@@ -1927,16 +1816,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
  */
 static enum xpc_retval
 xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
-                       xpc_notify_func func, void *key)
+            xpc_notify_func func, void *key)
 {
        enum xpc_retval ret = xpcSuccess;
        struct xpc_notify *notify = notify;
        s64 put, msg_number = msg->number;
 
-
        DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
-       DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
-                                       msg_number % ch->local_nentries);
+       DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
+               msg_number % ch->local_nentries);
        DBUG_ON(msg->flags & XPC_M_READY);
 
        if (ch->flags & XPC_C_DISCONNECTING) {
@@ -1959,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
                notify->key = key;
                notify->type = notify_type;
 
-               // >>> is a mb() needed here?
+               /* >>> is a mb() needed here? */
 
                if (ch->flags & XPC_C_DISCONNECTING) {
                        /*
@@ -1970,7 +1858,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
                         * the notify entry.
                         */
                        if (cmpxchg(&notify->type, notify_type, 0) ==
-                                                               notify_type) {
+                           notify_type) {
                                atomic_dec(&ch->n_to_notify);
                                ret = ch->reason;
                        }
@@ -1992,16 +1880,14 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
        /* see if the message is next in line to be sent, if so send it */
 
        put = ch->local_GP->put;
-       if (put == msg_number) {
+       if (put == msg_number)
                xpc_send_msgs(ch, put);
-       }
 
        /* drop the reference grabbed in xpc_allocate_msg() */
        xpc_msgqueue_deref(ch);
        return ret;
 }
 
-
 /*
  * Send a message previously allocated using xpc_initiate_allocate() on the
  * specified channel connected to the specified partition.
@@ -2029,8 +1915,7 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
        struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
        enum xpc_retval ret;
 
-
-       dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+       dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
                partid, ch_number);
 
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2042,7 +1927,6 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
        return ret;
 }
 
-
 /*
  * Send a message previously allocated using xpc_initiate_allocate on the
  * specified channel connected to the specified partition.
@@ -2075,14 +1959,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
  */
 enum xpc_retval
 xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
-                               xpc_notify_func func, void *key)
+                        xpc_notify_func func, void *key)
 {
        struct xpc_partition *part = &xpc_partitions[partid];
        struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
        enum xpc_retval ret;
 
-
-       dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+       dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
                partid, ch_number);
 
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2091,11 +1974,10 @@ xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
        DBUG_ON(func == NULL);
 
        ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
-                                                               func, key);
+                          func, key);
        return ret;
 }
 
-
 static struct xpc_msg *
 xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
 {
@@ -2105,7 +1987,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
        u64 msg_offset;
        enum xpc_retval ret;
 
-
        if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
                /* we were interrupted by a signal */
                return NULL;
@@ -2117,23 +1998,21 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
 
                msg_index = ch->next_msg_to_pull % ch->remote_nentries;
 
-               DBUG_ON(ch->next_msg_to_pull >=
-                                       (volatile s64) ch->w_remote_GP.put);
-               nmsgs =  (volatile s64) ch->w_remote_GP.put -
-                                               ch->next_msg_to_pull;
+               DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
+               nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
                if (msg_index + nmsgs > ch->remote_nentries) {
                        /* ignore the ones that wrap the msg queue for now */
                        nmsgs = ch->remote_nentries - msg_index;
                }
 
                msg_offset = msg_index * ch->msg_size;
-               msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
-                                                               msg_offset);
-               remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
-                                                               msg_offset);
+               msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
+               remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
+                                               msg_offset);
 
-               if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
-                               nmsgs * ch->msg_size)) != xpcSuccess) {
+               ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
+                                                nmsgs * ch->msg_size);
+               if (ret != xpcSuccess) {
 
                        dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
                                " msg %ld from partition %d, channel=%d, "
@@ -2146,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
                        return NULL;
                }
 
-               mb();   /* >>> this may not be needed, we're not sure */
-
                ch->next_msg_to_pull += nmsgs;
        }
 
@@ -2155,12 +2032,11 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
 
        /* return the message we were looking for */
        msg_offset = (get % ch->remote_nentries) * ch->msg_size;
-       msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
+       msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
 
        return msg;
 }
 
-
 /*
  * Get a message to be delivered.
  */
@@ -2170,23 +2046,21 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
        struct xpc_msg *msg = NULL;
        s64 get;
 
-
        do {
-               if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
+               if (ch->flags & XPC_C_DISCONNECTING)
                        break;
-               }
 
-               get = (volatile s64) ch->w_local_GP.get;
-               if (get == (volatile s64) ch->w_remote_GP.put) {
+               get = ch->w_local_GP.get;
+               rmb();  /* guarantee that .get loads before .put */
+               if (get == ch->w_remote_GP.put)
                        break;
-               }
 
                /* There are messages waiting to be pulled and delivered.
                 * We need to try to secure one for ourselves. We'll do this
                 * by trying to increment w_local_GP.get and hope that no one
                 * else beats us to it. If they do, we'll we'll simply have
                 * to try again for the next one.
-                */
+                */
 
                if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
                        /* we got the entry referenced by get */
@@ -2211,7 +2085,6 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
        return msg;
 }
 
-
 /*
  * Deliver a message to its intended recipient.
  */
@@ -2220,8 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
 {
        struct xpc_msg *msg;
 
-
-       if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
+       msg = xpc_get_deliverable_msg(ch);
+       if (msg != NULL) {
 
                /*
                 * This ref is taken to protect the payload itself from being
@@ -2235,16 +2108,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
                if (ch->func != NULL) {
                        dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
                                "msg_number=%ld, partid=%d, channel=%d\n",
-                               (void *) msg, msg->number, ch->partid,
+                               (void *)msg, msg->number, ch->partid,
                                ch->number);
 
                        /* deliver the message to its intended recipient */
                        ch->func(xpcMsgReceived, ch->partid, ch->number,
-                                       &msg->payload, ch->key);
+                                &msg->payload, ch->key);
 
                        dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
                                "msg_number=%ld, partid=%d, channel=%d\n",
-                               (void *) msg, msg->number, ch->partid,
+                               (void *)msg, msg->number, ch->partid,
                                ch->number);
                }
 
@@ -2252,7 +2125,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
        }
 }
 
-
 /*
  * Now we actually acknowledge the messages that have been delivered and ack'd
  * by advancing the cached remote message queue's Get value and if requested
@@ -2265,20 +2137,18 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
        s64 get = initial_get + 1;
        int send_IPI = 0;
 
-
        while (1) {
 
                while (1) {
-                       if (get == (volatile s64) ch->w_local_GP.get) {
+                       if (get == ch->w_local_GP.get)
                                break;
-                       }
 
-                       msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
-                              (get % ch->remote_nentries) * ch->msg_size);
+                       msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
+                                                (get % ch->remote_nentries) *
+                                                ch->msg_size);
 
-                       if (!(msg->flags & XPC_M_DONE)) {
+                       if (!(msg->flags & XPC_M_DONE))
                                break;
-                       }
 
                        msg_flags |= msg->flags;
                        get++;
@@ -2290,10 +2160,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
                }
 
                if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
-                                                               initial_get) {
+                   initial_get) {
                        /* someone else beat us to it */
-                       DBUG_ON((volatile s64) ch->local_GP->get <=
-                                                               initial_get);
+                       DBUG_ON(ch->local_GP->get <= initial_get);
                        break;
                }
 
@@ -2312,12 +2181,10 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
                initial_get = get;
        }
 
-       if (send_IPI) {
+       if (send_IPI)
                xpc_IPI_send_msgrequest(ch);
-       }
 }
 
-
 /*
  * Acknowledge receipt of a delivered message.
  *
@@ -2343,17 +2210,16 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
        struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
        s64 get, msg_number = msg->number;
 
-
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
        DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
 
        ch = &part->channels[ch_number];
 
        dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
-               (void *) msg, msg_number, ch->partid, ch->number);
+               (void *)msg, msg_number, ch->partid, ch->number);
 
-       DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
-                                       msg_number % ch->remote_nentries);
+       DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
+               msg_number % ch->remote_nentries);
        DBUG_ON(msg->flags & XPC_M_DONE);
 
        msg->flags |= XPC_M_DONE;
@@ -2369,11 +2235,9 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
         * been delivered.
         */
        get = ch->local_GP->get;
-       if (get == msg_number) {
+       if (get == msg_number)
                xpc_acknowledge_msgs(ch, get, msg->flags);
-       }
 
        /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg()  */
        xpc_msgqueue_deref(ch);
 }
-
similarity index 82%
rename from arch/ia64/sn/kernel/xpc_main.c
rename to drivers/misc/sgi-xp/xpc_main.c
index 9e0b164..f673ba9 100644 (file)
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) support - standard version.
  *
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
 #include <linux/completion.h>
 #include <linux/kdebug.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/uaccess.h>
-#include <asm/sn/xpc.h>
-
+#include "xpc.h"
 
 /* define two XPC debug device structures to be used with dev_dbg() et al */
 
@@ -81,10 +77,8 @@ struct device xpc_chan_dbg_subname = {
 struct device *xpc_part = &xpc_part_dbg_subname;
 struct device *xpc_chan = &xpc_chan_dbg_subname;
 
-
 static int xpc_kdebug_ignore;
 
-
 /* systune related variables for /proc/sys directories */
 
 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -96,61 +90,56 @@ static int xpc_hb_check_min_interval = 10;
 static int xpc_hb_check_max_interval = 120;
 
 int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
-static int xpc_disengage_request_min_timelimit = 0;
+static int xpc_disengage_request_min_timelimit;        /* = 0 */
 static int xpc_disengage_request_max_timelimit = 120;
 
 static ctl_table xpc_sys_xpc_hb_dir[] = {
        {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "hb_interval",
-               .data           = &xpc_hb_interval,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &xpc_hb_min_interval,
-               .extra2         = &xpc_hb_max_interval
-       },
+        .ctl_name = CTL_UNNUMBERED,
+        .procname = "hb_interval",
+        .data = &xpc_hb_interval,
+        .maxlen = sizeof(int),
+        .mode = 0644,
+        .proc_handler = &proc_dointvec_minmax,
+        .strategy = &sysctl_intvec,
+        .extra1 = &xpc_hb_min_interval,
+        .extra2 = &xpc_hb_max_interval},
        {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "hb_check_interval",
-               .data           = &xpc_hb_check_interval,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &xpc_hb_check_min_interval,
-               .extra2         = &xpc_hb_check_max_interval
-       },
+        .ctl_name = CTL_UNNUMBERED,
+        .procname = "hb_check_interval",
+        .data = &xpc_hb_check_interval,
+        .maxlen = sizeof(int),
+        .mode = 0644,
+        .proc_handler = &proc_dointvec_minmax,
+        .strategy = &sysctl_intvec,
+        .extra1 = &xpc_hb_check_min_interval,
+        .extra2 = &xpc_hb_check_max_interval},
        {}
 };
 static ctl_table xpc_sys_xpc_dir[] = {
        {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "hb",
-               .mode           = 0555,
-               .child          = xpc_sys_xpc_hb_dir
-       },
+        .ctl_name = CTL_UNNUMBERED,
+        .procname = "hb",
+        .mode = 0555,
+        .child = xpc_sys_xpc_hb_dir},
        {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "disengage_request_timelimit",
-               .data           = &xpc_disengage_request_timelimit,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &xpc_disengage_request_min_timelimit,
-               .extra2         = &xpc_disengage_request_max_timelimit
-       },
+        .ctl_name = CTL_UNNUMBERED,
+        .procname = "disengage_request_timelimit",
+        .data = &xpc_disengage_request_timelimit,
+        .maxlen = sizeof(int),
+        .mode = 0644,
+        .proc_handler = &proc_dointvec_minmax,
+        .strategy = &sysctl_intvec,
+        .extra1 = &xpc_disengage_request_min_timelimit,
+        .extra2 = &xpc_disengage_request_max_timelimit},
        {}
 };
 static ctl_table xpc_sys_dir[] = {
        {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "xpc",
-               .mode           = 0555,
-               .child          = xpc_sys_xpc_dir
-       },
+        .ctl_name = CTL_UNNUMBERED,
+        .procname = "xpc",
+        .mode = 0555,
+        .child = xpc_sys_xpc_dir},
        {}
 };
 static struct ctl_table_header *xpc_sysctl;
@@ -172,13 +161,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
 /* notification that the xpc_discovery thread has exited */
 static DECLARE_COMPLETION(xpc_discovery_exited);
 
-
 static struct timer_list xpc_hb_timer;
 
-
 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
 
-
 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
 static struct notifier_block xpc_reboot_notifier = {
        .notifier_call = xpc_system_reboot,
@@ -189,25 +175,22 @@ static struct notifier_block xpc_die_notifier = {
        .notifier_call = xpc_system_die,
 };
 
-
 /*
  * Timer function to enforce the timelimit on the partition disengage request.
  */
 static void
 xpc_timeout_partition_disengage_request(unsigned long data)
 {
-       struct xpc_partition *part = (struct xpc_partition *) data;
-
+       struct xpc_partition *part = (struct xpc_partition *)data;
 
        DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
 
-       (void) xpc_partition_disengaged(part);
+       (void)xpc_partition_disengaged(part);
 
        DBUG_ON(part->disengage_request_timeout != 0);
        DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
 }
 
-
 /*
  * Notify the heartbeat check thread that an IRQ has been received.
  */
@@ -219,7 +202,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-
 /*
  * Timer to produce the heartbeat.  The timer structures function is
  * already set when this is initially called.  A tunable is used to
@@ -230,15 +212,13 @@ xpc_hb_beater(unsigned long dummy)
 {
        xpc_vars->heartbeat++;
 
-       if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
+       if (time_after_eq(jiffies, xpc_hb_check_timeout))
                wake_up_interruptible(&xpc_act_IRQ_wq);
-       }
 
        xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
        add_timer(&xpc_hb_timer);
 }
 
-
 /*
  * This thread is responsible for nearly all of the partition
  * activation/deactivation.
@@ -248,27 +228,23 @@ xpc_hb_checker(void *ignore)
 {
        int last_IRQ_count = 0;
        int new_IRQ_count;
-       int force_IRQ=0;
-
+       int force_IRQ = 0;
 
        /* this thread was marked active by xpc_hb_init() */
 
-       daemonize(XPC_HB_CHECK_THREAD_NAME);
-
        set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
        xpc_hb_beater(0);
 
-       while (!(volatile int) xpc_exiting) {
+       while (!xpc_exiting) {
 
                dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
                        "been received\n",
-                       (int) (xpc_hb_check_timeout - jiffies),
+                       (int)(xpc_hb_check_timeout - jiffies),
                        atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
 
-
                /* checking of remote heartbeats is skewed by IRQ handling */
                if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
                        dev_dbg(xpc_part, "checking remote heartbeats\n");
@@ -282,7 +258,6 @@ xpc_hb_checker(void *ignore)
                        force_IRQ = 1;
                }
 
-
                /* check for outstanding IRQs */
                new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
                if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
@@ -294,30 +269,30 @@ xpc_hb_checker(void *ignore)
                        last_IRQ_count += xpc_identify_act_IRQ_sender();
                        if (last_IRQ_count < new_IRQ_count) {
                                /* retry once to help avoid missing AMO */
-                               (void) xpc_identify_act_IRQ_sender();
+                               (void)xpc_identify_act_IRQ_sender();
                        }
                        last_IRQ_count = new_IRQ_count;
 
                        xpc_hb_check_timeout = jiffies +
-                                          (xpc_hb_check_interval * HZ);
+                           (xpc_hb_check_interval * HZ);
                }
 
                /* wait for IRQ or timeout */
-               (void) wait_event_interruptible(xpc_act_IRQ_wq,
-                           (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
-                                       time_after_eq(jiffies, xpc_hb_check_timeout) ||
-                                               (volatile int) xpc_exiting));
+               (void)wait_event_interruptible(xpc_act_IRQ_wq,
+                                              (last_IRQ_count <
+                                               atomic_read(&xpc_act_IRQ_rcvd)
+                                               || time_after_eq(jiffies,
+                                                       xpc_hb_check_timeout) ||
+                                               xpc_exiting));
        }
 
        dev_dbg(xpc_part, "heartbeat checker is exiting\n");
 
-
        /* mark this thread as having exited */
        complete(&xpc_hb_checker_exited);
        return 0;
 }
 
-
 /*
  * This thread will attempt to discover other partitions to activate
  * based on info provided by SAL. This new thread is short lived and
@@ -326,8 +301,6 @@ xpc_hb_checker(void *ignore)
 static int
 xpc_initiate_discovery(void *ignore)
 {
-       daemonize(XPC_DISCOVERY_THREAD_NAME);
-
        xpc_discovery();
 
        dev_dbg(xpc_part, "discovery thread is exiting\n");
@@ -337,7 +310,6 @@ xpc_initiate_discovery(void *ignore)
        return 0;
 }
 
-
 /*
  * Establish first contact with the remote partititon. This involves pulling
  * the XPC per partition variables from the remote partition and waiting for
@@ -348,7 +320,6 @@ xpc_make_first_contact(struct xpc_partition *part)
 {
        enum xpc_retval ret;
 
-
        while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
                if (ret != xpcRetry) {
                        XPC_DEACTIVATE_PARTITION(part, ret);
@@ -359,17 +330,15 @@ xpc_make_first_contact(struct xpc_partition *part)
                        "partition %d\n", XPC_PARTID(part));
 
                /* wait a 1/4 of a second or so */
-               (void) msleep_interruptible(250);
+               (void)msleep_interruptible(250);
 
-               if (part->act_state == XPC_P_DEACTIVATING) {
+               if (part->act_state == XPC_P_DEACTIVATING)
                        return part->reason;
-               }
        }
 
        return xpc_mark_partition_active(part);
 }
 
-
 /*
  * The first kthread assigned to a newly activated partition is the one
  * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
@@ -386,12 +355,11 @@ static void
 xpc_channel_mgr(struct xpc_partition *part)
 {
        while (part->act_state != XPC_P_DEACTIVATING ||
-                       atomic_read(&part->nchannels_active) > 0 ||
-                                       !xpc_partition_disengaged(part)) {
+              atomic_read(&part->nchannels_active) > 0 ||
+              !xpc_partition_disengaged(part)) {
 
                xpc_process_channel_activity(part);
 
-
                /*
                 * Wait until we've been requested to activate kthreads or
                 * all of the channel's message queues have been torn down or
@@ -406,21 +374,16 @@ xpc_channel_mgr(struct xpc_partition *part)
                 * wake him up.
                 */
                atomic_dec(&part->channel_mgr_requests);
-               (void) wait_event_interruptible(part->channel_mgr_wq,
+               (void)wait_event_interruptible(part->channel_mgr_wq,
                                (atomic_read(&part->channel_mgr_requests) > 0 ||
-                               (volatile u64) part->local_IPI_amo != 0 ||
-                               ((volatile u8) part->act_state ==
-                                                       XPC_P_DEACTIVATING &&
-                               atomic_read(&part->nchannels_active) == 0 &&
-                               xpc_partition_disengaged(part))));
+                                part->local_IPI_amo != 0 ||
+                                (part->act_state == XPC_P_DEACTIVATING &&
+                                atomic_read(&part->nchannels_active) == 0 &&
+                                xpc_partition_disengaged(part))));
                atomic_set(&part->channel_mgr_requests, 1);
-
-               // >>> Does it need to wakeup periodically as well? In case we
-               // >>> miscalculated the #of kthreads to wakeup or create?
        }
 }
 
-
 /*
  * When XPC HB determines that a partition has come up, it will create a new
  * kthread and that kthread will call this function to attempt to set up the
@@ -443,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part)
 
        dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
 
-       if (xpc_setup_infrastructure(part) != xpcSuccess) {
+       if (xpc_setup_infrastructure(part) != xpcSuccess)
                return;
-       }
 
        /*
         * The kthread that XPC HB called us with will become the
@@ -454,27 +416,22 @@ xpc_partition_up(struct xpc_partition *part)
         * has been dismantled.
         */
 
-       (void) xpc_part_ref(part);      /* this will always succeed */
+       (void)xpc_part_ref(part);       /* this will always succeed */
 
-       if (xpc_make_first_contact(part) == xpcSuccess) {
+       if (xpc_make_first_contact(part) == xpcSuccess)
                xpc_channel_mgr(part);
-       }
 
        xpc_part_deref(part);
 
        xpc_teardown_infrastructure(part);
 }
 
-
 static int
 xpc_activating(void *__partid)
 {
-       partid_t partid = (u64) __partid;
+       partid_t partid = (u64)__partid;
        struct xpc_partition *part = &xpc_partitions[partid];
        unsigned long irq_flags;
-       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-       int ret;
-
 
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 
@@ -496,21 +453,6 @@ xpc_activating(void *__partid)
 
        dev_dbg(xpc_part, "bringing partition %d up\n", partid);
 
-       daemonize("xpc%02d", partid);
-
-       /*
-        * This thread needs to run at a realtime priority to prevent a
-        * significant performance degradation.
-        */
-       ret = sched_setscheduler(current, SCHED_FIFO, &param);
-       if (ret != 0) {
-               dev_warn(xpc_part, "unable to set pid %d to a realtime "
-                       "priority, ret=%d\n", current->pid, ret);
-       }
-
-       /* allow this thread and its children to run on any CPU */
-       set_cpus_allowed(current, CPU_MASK_ALL);
-
        /*
         * Register the remote partition's AMOs with SAL so it can handle
         * and cleanup errors within that address range should the remote
@@ -522,9 +464,9 @@ xpc_activating(void *__partid)
         * reloads and system reboots.
         */
        if (sn_register_xp_addr_region(part->remote_amos_page_pa,
-                                                       PAGE_SIZE, 1) < 0) {
+                                      PAGE_SIZE, 1) < 0) {
                dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
-                       "xp_addr region\n", partid);
+                        "xp_addr region\n", partid);
 
                spin_lock_irqsave(&part->act_lock, irq_flags);
                part->act_state = XPC_P_INACTIVE;
@@ -537,12 +479,11 @@ xpc_activating(void *__partid)
        xpc_allow_hb(partid, xpc_vars);
        xpc_IPI_send_activated(part);
 
-
        /*
         * xpc_partition_up() holds this thread and marks this partition as
         * XPC_P_ACTIVE by calling xpc_hb_mark_active().
         */
-       (void) xpc_partition_up(part);
+       (void)xpc_partition_up(part);
 
        xpc_disallow_hb(partid, xpc_vars);
        xpc_mark_partition_inactive(part);
@@ -555,14 +496,12 @@ xpc_activating(void *__partid)
        return 0;
 }
 
-
 void
 xpc_activate_partition(struct xpc_partition *part)
 {
        partid_t partid = XPC_PARTID(part);
        unsigned long irq_flags;
-       pid_t pid;
-
+       struct task_struct *kthread;
 
        spin_lock_irqsave(&part->act_lock, irq_flags);
 
@@ -573,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part)
 
        spin_unlock_irqrestore(&part->act_lock, irq_flags);
 
-       pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
-
-       if (unlikely(pid <= 0)) {
+       kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
+                             partid);
+       if (IS_ERR(kthread)) {
                spin_lock_irqsave(&part->act_lock, irq_flags);
                part->act_state = XPC_P_INACTIVE;
                XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
@@ -583,12 +522,11 @@ xpc_activate_partition(struct xpc_partition *part)
        }
 }
 
-
 /*
  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
  * than one partition, we use an AMO_t structure per partition to indicate
- * whether a partition has sent an IPI or not.  >>> If it has, then wake up the
+ * whether a partition has sent an IPI or not.  If it has, then wake up the
  * associated kthread to handle it.
  *
  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
@@ -603,10 +541,9 @@ xpc_activate_partition(struct xpc_partition *part)
 irqreturn_t
 xpc_notify_IRQ_handler(int irq, void *dev_id)
 {
-       partid_t partid = (partid_t) (u64) dev_id;
+       partid_t partid = (partid_t) (u64)dev_id;
        struct xpc_partition *part = &xpc_partitions[partid];
 
-
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 
        if (xpc_part_ref(part)) {
@@ -617,7 +554,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-
 /*
  * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
  * because the write to their associated IPI amo completed after the IRQ/IPI
@@ -630,13 +566,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
                xpc_check_for_channel_activity(part);
 
                part->dropped_IPI_timer.expires = jiffies +
-                                                       XPC_P_DROPPED_IPI_WAIT;
+                   XPC_P_DROPPED_IPI_WAIT;
                add_timer(&part->dropped_IPI_timer);
                xpc_part_deref(part);
        }
 }
 
-
 void
 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
 {
@@ -644,7 +579,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
        int assigned = atomic_read(&ch->kthreads_assigned);
        int wakeup;
 
-
        DBUG_ON(needed <= 0);
 
        if (idle > 0) {
@@ -658,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
                wake_up_nr(&ch->idle_wq, wakeup);
        }
 
-       if (needed <= 0) {
+       if (needed <= 0)
                return;
-       }
 
        if (needed + assigned > ch->kthreads_assigned_limit) {
                needed = ch->kthreads_assigned_limit - assigned;
-               // >>>should never be less than 0
-               if (needed <= 0) {
+               if (needed <= 0)
                        return;
-               }
        }
 
        dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
@@ -676,7 +607,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
        xpc_create_kthreads(ch, needed, 0);
 }
 
-
 /*
  * This function is where XPC's kthreads wait for messages to deliver.
  */
@@ -686,15 +616,13 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
        do {
                /* deliver messages to their intended recipients */
 
-               while ((volatile s64) ch->w_local_GP.get <
-                               (volatile s64) ch->w_remote_GP.put &&
-                                       !((volatile u32) ch->flags &
-                                               XPC_C_DISCONNECTING)) {
+               while (ch->w_local_GP.get < ch->w_remote_GP.put &&
+                      !(ch->flags & XPC_C_DISCONNECTING)) {
                        xpc_deliver_msg(ch);
                }
 
                if (atomic_inc_return(&ch->kthreads_idle) >
-                                               ch->kthreads_idle_limit) {
+                   ch->kthreads_idle_limit) {
                        /* too many idle kthreads on this channel */
                        atomic_dec(&ch->kthreads_idle);
                        break;
@@ -703,20 +631,17 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
                dev_dbg(xpc_chan, "idle kthread calling "
                        "wait_event_interruptible_exclusive()\n");
 
-               (void) wait_event_interruptible_exclusive(ch->idle_wq,
-                               ((volatile s64) ch->w_local_GP.get <
-                                       (volatile s64) ch->w_remote_GP.put ||
-                               ((volatile u32) ch->flags &
-                                               XPC_C_DISCONNECTING)));
+               (void)wait_event_interruptible_exclusive(ch->idle_wq,
+                               (ch->w_local_GP.get < ch->w_remote_GP.put ||
+                                (ch->flags & XPC_C_DISCONNECTING)));
 
                atomic_dec(&ch->kthreads_idle);
 
-       } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
+       } while (!(ch->flags & XPC_C_DISCONNECTING));
 }
 
-
 static int
-xpc_daemonize_kthread(void *args)
+xpc_kthread_start(void *args)
 {
        partid_t partid = XPC_UNPACK_ARG1(args);
        u16 ch_number = XPC_UNPACK_ARG2(args);
@@ -725,9 +650,6 @@ xpc_daemonize_kthread(void *args)
        int n_needed;
        unsigned long irq_flags;
 
-
-       daemonize("xpc%02dc%d", partid, ch_number);
-
        dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
                partid, ch_number);
 
@@ -756,10 +678,9 @@ xpc_daemonize_kthread(void *args)
                         * need one less than total #of messages to deliver.
                         */
                        n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
-                       if (n_needed > 0 &&
-                                       !(ch->flags & XPC_C_DISCONNECTING)) {
+                       if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
                                xpc_activate_kthreads(ch, n_needed);
-                       }
+
                } else {
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
                }
@@ -771,7 +692,7 @@ xpc_daemonize_kthread(void *args)
 
        spin_lock_irqsave(&ch->lock, irq_flags);
        if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
-                       !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+           !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
                ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
                spin_unlock_irqrestore(&ch->lock, irq_flags);
 
@@ -798,7 +719,6 @@ xpc_daemonize_kthread(void *args)
        return 0;
 }
 
-
 /*
  * For each partition that XPC has established communications with, there is
  * a minimum of one kernel thread assigned to perform any operation that
@@ -813,13 +733,12 @@ xpc_daemonize_kthread(void *args)
  */
 void
 xpc_create_kthreads(struct xpc_channel *ch, int needed,
-                       int ignore_disconnecting)
+                   int ignore_disconnecting)
 {
        unsigned long irq_flags;
-       pid_t pid;
        u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
        struct xpc_partition *part = &xpc_partitions[ch->partid];
-
+       struct task_struct *kthread;
 
        while (needed-- > 0) {
 
@@ -832,7 +751,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
                        if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
                                /* kthreads assigned had gone to zero */
                                BUG_ON(!(ch->flags &
-                                       XPC_C_DISCONNECTINGCALLOUT_MADE));
+                                        XPC_C_DISCONNECTINGCALLOUT_MADE));
                                break;
                        }
 
@@ -843,11 +762,12 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
                        if (atomic_inc_return(&part->nchannels_engaged) == 1)
                                xpc_mark_partition_engaged(part);
                }
-               (void) xpc_part_ref(part);
+               (void)xpc_part_ref(part);
                xpc_msgqueue_ref(ch);
 
-               pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
-               if (pid < 0) {
+               kthread = kthread_run(xpc_kthread_start, (void *)args,
+                                     "xpc%02dc%d", ch->partid, ch->number);
+               if (IS_ERR(kthread)) {
                        /* the fork failed */
 
                        /*
@@ -857,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
                         * to this channel are blocked in the channel's
                         * registerer, because the only thing that will unblock
                         * them is the xpcDisconnecting callout that this
-                        * failed kernel_thread would have made.
+                        * failed kthread_run() would have made.
                         */
 
                        if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
@@ -869,7 +789,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
                        xpc_part_deref(part);
 
                        if (atomic_read(&ch->kthreads_assigned) <
-                                               ch->kthreads_idle_limit) {
+                           ch->kthreads_idle_limit) {
                                /*
                                 * Flag this as an error only if we have an
                                 * insufficient #of kthreads for the channel
@@ -877,17 +797,14 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
                                 */
                                spin_lock_irqsave(&ch->lock, irq_flags);
                                XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
-                                                               &irq_flags);
+                                                      &irq_flags);
                                spin_unlock_irqrestore(&ch->lock, irq_flags);
                        }
                        break;
                }
-
-               ch->kthreads_created++; // >>> temporary debug only!!!
        }
 }
 
-
 void
 xpc_disconnect_wait(int ch_number)
 {
@@ -897,14 +814,12 @@ xpc_disconnect_wait(int ch_number)
        struct xpc_channel *ch;
        int wakeup_channel_mgr;
 
-
        /* now wait for all callouts to the caller's function to cease */
        for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
                part = &xpc_partitions[partid];
 
-               if (!xpc_part_ref(part)) {
+               if (!xpc_part_ref(part))
                        continue;
-               }
 
                ch = &part->channels[ch_number];
 
@@ -923,7 +838,8 @@ xpc_disconnect_wait(int ch_number)
                        if (part->act_state != XPC_P_DEACTIVATING) {
                                spin_lock(&part->IPI_lock);
                                XPC_SET_IPI_FLAGS(part->local_IPI_amo,
-                                       ch->number, ch->delayed_IPI_flags);
+                                                 ch->number,
+                                                 ch->delayed_IPI_flags);
                                spin_unlock(&part->IPI_lock);
                                wakeup_channel_mgr = 1;
                        }
@@ -933,15 +849,13 @@ xpc_disconnect_wait(int ch_number)
                ch->flags &= ~XPC_C_WDISCONNECT;
                spin_unlock_irqrestore(&ch->lock, irq_flags);
 
-               if (wakeup_channel_mgr) {
+               if (wakeup_channel_mgr)
                        xpc_wakeup_channel_mgr(part);
-               }
 
                xpc_part_deref(part);
        }
 }
 
-
 static void
 xpc_do_exit(enum xpc_retval reason)
 {
@@ -950,7 +864,6 @@ xpc_do_exit(enum xpc_retval reason)
        struct xpc_partition *part;
        unsigned long printmsg_time, disengage_request_timeout = 0;
 
-
        /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
        DBUG_ON(xpc_exiting == 1);
 
@@ -971,10 +884,8 @@ xpc_do_exit(enum xpc_retval reason)
        /* wait for the heartbeat checker thread to exit */
        wait_for_completion(&xpc_hb_checker_exited);
 
-
        /* sleep for a 1/3 of a second or so */
-       (void) msleep_interruptible(300);
-
+       (void)msleep_interruptible(300);
 
        /* wait for all partitions to become inactive */
 
@@ -988,7 +899,7 @@ xpc_do_exit(enum xpc_retval reason)
                        part = &xpc_partitions[partid];
 
                        if (xpc_partition_disengaged(part) &&
-                                       part->act_state == XPC_P_INACTIVE) {
+                           part->act_state == XPC_P_INACTIVE) {
                                continue;
                        }
 
@@ -997,47 +908,46 @@ xpc_do_exit(enum xpc_retval reason)
                        XPC_DEACTIVATE_PARTITION(part, reason);
 
                        if (part->disengage_request_timeout >
-                                               disengage_request_timeout) {
+                           disengage_request_timeout) {
                                disengage_request_timeout =
-                                               part->disengage_request_timeout;
+                                   part->disengage_request_timeout;
                        }
                }
 
                if (xpc_partition_engaged(-1UL)) {
                        if (time_after(jiffies, printmsg_time)) {
                                dev_info(xpc_part, "waiting for remote "
-                                       "partitions to disengage, timeout in "
-                                       "%ld seconds\n",
-                                       (disengage_request_timeout - jiffies)
-                                                                       / HZ);
+                                        "partitions to disengage, timeout in "
+                                        "%ld seconds\n",
+                                        (disengage_request_timeout - jiffies)
+                                        / HZ);
                                printmsg_time = jiffies +
-                                       (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
+                                   (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
                                printed_waiting_msg = 1;
                        }
 
                } else if (active_part_count > 0) {
                        if (printed_waiting_msg) {
                                dev_info(xpc_part, "waiting for local partition"
-                                       " to disengage\n");
+                                        " to disengage\n");
                                printed_waiting_msg = 0;
                        }
 
                } else {
                        if (!xpc_disengage_request_timedout) {
                                dev_info(xpc_part, "all partitions have "
-                                       "disengaged\n");
+                                        "disengaged\n");
                        }
                        break;
                }
 
                /* sleep for a 1/3 of a second or so */
-               (void) msleep_interruptible(300);
+               (void)msleep_interruptible(300);
 
        } while (1);
 
        DBUG_ON(xpc_partition_engaged(-1UL));
 
-
        /* indicate to others that our reserved page is uninitialized */
        xpc_rsvd_page->vars_pa = 0;
 
@@ -1047,27 +957,24 @@ xpc_do_exit(enum xpc_retval reason)
 
        if (reason == xpcUnloading) {
                /* take ourselves off of the reboot_notifier_list */
-               (void) unregister_reboot_notifier(&xpc_reboot_notifier);
+               (void)unregister_reboot_notifier(&xpc_reboot_notifier);
 
                /* take ourselves off of the die_notifier list */
-               (void) unregister_die_notifier(&xpc_die_notifier);
+               (void)unregister_die_notifier(&xpc_die_notifier);
        }
 
        /* close down protections for IPI operations */
        xpc_restrict_IPI_ops();
 
-
        /* clear the interface to XPC's functions */
        xpc_clear_interface();
 
-       if (xpc_sysctl) {
+       if (xpc_sysctl)
                unregister_sysctl_table(xpc_sysctl);
-       }
 
        kfree(xpc_remote_copy_buffer_base);
 }
 
-
 /*
  * This function is called when the system is being rebooted.
  */
@@ -1076,7 +983,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
 {
        enum xpc_retval reason;
 
-
        switch (event) {
        case SYS_RESTART:
                reason = xpcSystemReboot;
@@ -1095,7 +1001,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
        return NOTIFY_DONE;
 }
 
-
 /*
  * Notify other partitions to disengage from all references to our memory.
  */
@@ -1107,17 +1012,16 @@ xpc_die_disengage(void)
        unsigned long engaged;
        long time, printmsg_time, disengage_request_timeout;
 
-
        /* keep xpc_hb_checker thread from doing anything (just in case) */
        xpc_exiting = 1;
 
-       xpc_vars->heartbeating_to_mask = 0;  /* indicate we're deactivated */
+       xpc_vars->heartbeating_to_mask = 0;     /* indicate we're deactivated */
 
        for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
                part = &xpc_partitions[partid];
 
                if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
-                                                       remote_vars_version)) {
+                   remote_vars_version)) {
 
                        /* just in case it was left set by an earlier XPC */
                        xpc_clear_partition_engaged(1UL << partid);
@@ -1125,7 +1029,7 @@ xpc_die_disengage(void)
                }
 
                if (xpc_partition_engaged(1UL << partid) ||
-                                       part->act_state != XPC_P_INACTIVE) {
+                   part->act_state != XPC_P_INACTIVE) {
                        xpc_request_partition_disengage(part);
                        xpc_mark_partition_disengaged(part);
                        xpc_IPI_send_disengage(part);
@@ -1134,9 +1038,9 @@ xpc_die_disengage(void)
 
        time = rtc_time();
        printmsg_time = time +
-               (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
+           (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
        disengage_request_timeout = time +
-               (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
+           (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
 
        /* wait for all other partitions to disengage from us */
 
@@ -1152,8 +1056,8 @@ xpc_die_disengage(void)
                        for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
                                if (engaged & (1UL << partid)) {
                                        dev_info(xpc_part, "disengage from "
-                                               "remote partition %d timed "
-                                               "out\n", partid);
+                                                "remote partition %d timed "
+                                                "out\n", partid);
                                }
                        }
                        break;
@@ -1161,17 +1065,16 @@ xpc_die_disengage(void)
 
                if (time >= printmsg_time) {
                        dev_info(xpc_part, "waiting for remote partitions to "
-                               "disengage, timeout in %ld seconds\n",
-                               (disengage_request_timeout - time) /
-                                               sn_rtc_cycles_per_second);
+                                "disengage, timeout in %ld seconds\n",
+                                (disengage_request_timeout - time) /
+                                sn_rtc_cycles_per_second);
                        printmsg_time = time +
-                                       (XPC_DISENGAGE_PRINTMSG_INTERVAL *
-                                               sn_rtc_cycles_per_second);
+                           (XPC_DISENGAGE_PRINTMSG_INTERVAL *
+                            sn_rtc_cycles_per_second);
                }
        }
 }
 
-
 /*
  * This function is called when the system is being restarted or halted due
  * to some sort of system failure. If this is the case we need to notify the
@@ -1191,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
 
        case DIE_KDEBUG_ENTER:
                /* Should lack of heartbeat be ignored by other partitions? */
-               if (!xpc_kdebug_ignore) {
+               if (!xpc_kdebug_ignore)
                        break;
-               }
+
                /* fall through */
        case DIE_MCA_MONARCH_ENTER:
        case DIE_INIT_MONARCH_ENTER:
@@ -1203,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
 
        case DIE_KDEBUG_LEAVE:
                /* Is lack of heartbeat being ignored by other partitions? */
-               if (!xpc_kdebug_ignore) {
+               if (!xpc_kdebug_ignore)
                        break;
-               }
+
                /* fall through */
        case DIE_MCA_MONARCH_LEAVE:
        case DIE_INIT_MONARCH_LEAVE:
@@ -1217,26 +1120,23 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
        return NOTIFY_DONE;
 }
 
-
 int __init
 xpc_init(void)
 {
        int ret;
        partid_t partid;
        struct xpc_partition *part;
-       pid_t pid;
+       struct task_struct *kthread;
        size_t buf_size;
 
-
-       if (!ia64_platform_is("sn2")) {
+       if (!ia64_platform_is("sn2"))
                return -ENODEV;
-       }
-
 
        buf_size = max(XPC_RP_VARS_SIZE,
-                               XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
+                      XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
        xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
-                                    GFP_KERNEL, &xpc_remote_copy_buffer_base);
+                                                              GFP_KERNEL,
+                                                 &xpc_remote_copy_buffer_base);
        if (xpc_remote_copy_buffer == NULL)
                return -ENOMEM;
 
@@ -1256,7 +1156,7 @@ xpc_init(void)
        for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
                part = &xpc_partitions[partid];
 
-               DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
+               DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
 
                part->act_IRQ_rcvd = 0;
                spin_lock_init(&part->act_lock);
@@ -1265,8 +1165,8 @@ xpc_init(void)
 
                init_timer(&part->disengage_request_timer);
                part->disengage_request_timer.function =
-                               xpc_timeout_partition_disengage_request;
-               part->disengage_request_timer.data = (unsigned long) part;
+                   xpc_timeout_partition_disengage_request;
+               part->disengage_request_timer.data = (unsigned long)part;
 
                part->setup_state = XPC_P_UNSET;
                init_waitqueue_head(&part->teardown_wq);
@@ -1292,16 +1192,15 @@ xpc_init(void)
         * but rather immediately process the interrupt.
         */
        ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
-                                                       "xpc hb", NULL);
+                         "xpc hb", NULL);
        if (ret != 0) {
                dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
                        "errno=%d\n", -ret);
 
                xpc_restrict_IPI_ops();
 
-               if (xpc_sysctl) {
+               if (xpc_sysctl)
                        unregister_sysctl_table(xpc_sysctl);
-               }
 
                kfree(xpc_remote_copy_buffer_base);
                return -EBUSY;
@@ -1319,26 +1218,22 @@ xpc_init(void)
                free_irq(SGI_XPC_ACTIVATE, NULL);
                xpc_restrict_IPI_ops();
 
-               if (xpc_sysctl) {
+               if (xpc_sysctl)
                        unregister_sysctl_table(xpc_sysctl);
-               }
 
                kfree(xpc_remote_copy_buffer_base);
                return -EBUSY;
        }
 
-
        /* add ourselves to the reboot_notifier_list */
        ret = register_reboot_notifier(&xpc_reboot_notifier);
-       if (ret != 0) {
+       if (ret != 0)
                dev_warn(xpc_part, "can't register reboot notifier\n");
-       }
 
        /* add ourselves to the die_notifier list */
        ret = register_die_notifier(&xpc_die_notifier);
-       if (ret != 0) {
+       if (ret != 0)
                dev_warn(xpc_part, "can't register die notifier\n");
-       }
 
        init_timer(&xpc_hb_timer);
        xpc_hb_timer.function = xpc_hb_beater;
@@ -1347,39 +1242,38 @@ xpc_init(void)
         * The real work-horse behind xpc.  This processes incoming
         * interrupts and monitors remote heartbeats.
         */
-       pid = kernel_thread(xpc_hb_checker, NULL, 0);
-       if (pid < 0) {
+       kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
+       if (IS_ERR(kthread)) {
                dev_err(xpc_part, "failed while forking hb check thread\n");
 
                /* indicate to others that our reserved page is uninitialized */
                xpc_rsvd_page->vars_pa = 0;
 
                /* take ourselves off of the reboot_notifier_list */
-               (void) unregister_reboot_notifier(&xpc_reboot_notifier);
+               (void)unregister_reboot_notifier(&xpc_reboot_notifier);
 
                /* take ourselves off of the die_notifier list */
-               (void) unregister_die_notifier(&xpc_die_notifier);
+               (void)unregister_die_notifier(&xpc_die_notifier);
 
                del_timer_sync(&xpc_hb_timer);
                free_irq(SGI_XPC_ACTIVATE, NULL);
                xpc_restrict_IPI_ops();
 
-               if (xpc_sysctl) {
+               if (xpc_sysctl)
                        unregister_sysctl_table(xpc_sysctl);
-               }
 
                kfree(xpc_remote_copy_buffer_base);
                return -EBUSY;
        }
 
-
        /*
         * Startup a thread that will attempt to discover other partitions to
         * activate based on info provided by SAL. This new thread is short
         * lived and will exit once discovery is complete.
         */
-       pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
-       if (pid < 0) {
+       kthread = kthread_run(xpc_initiate_discovery, NULL,
+                             XPC_DISCOVERY_THREAD_NAME);
+       if (IS_ERR(kthread)) {
                dev_err(xpc_part, "failed while forking discovery thread\n");
 
                /* mark this new thread as a non-starter */
@@ -1389,7 +1283,6 @@ xpc_init(void)
                return -EBUSY;
        }
 
-
        /* set the interface to point at XPC's functions */
        xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
                          xpc_initiate_allocate, xpc_initiate_send,
@@ -1398,16 +1291,16 @@ xpc_init(void)
 
        return 0;
 }
-module_init(xpc_init);
 
+module_init(xpc_init);
 
 void __exit
 xpc_exit(void)
 {
        xpc_do_exit(xpcUnloading);
 }
-module_exit(xpc_exit);
 
+module_exit(xpc_exit);
 
 MODULE_AUTHOR("Silicon Graphics, Inc.");
 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
@@ -1415,17 +1308,16 @@ MODULE_LICENSE("GPL");
 
 module_param(xpc_hb_interval, int, 0);
 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
-               "heartbeat increments.");
+                "heartbeat increments.");
 
 module_param(xpc_hb_check_interval, int, 0);
 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
-               "heartbeat checks.");
+                "heartbeat checks.");
 
 module_param(xpc_disengage_request_timelimit, int, 0);
 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
-               "for disengage request to complete.");
+                "for disengage request to complete.");
 
 module_param(xpc_kdebug_ignore, int, 0);
 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
-               "other partitions when dropping into kdebug.");
-
+                "other partitions when dropping into kdebug.");
similarity index 79%
rename from arch/ia64/sn/kernel/xpc_partition.c
rename to drivers/misc/sgi-xp/xpc_partition.c
index 9e97c26..27e200e 100644 (file)
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) partition support.
  *
@@ -16,7 +15,6 @@
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/sysctl.h>
 #include <linux/cache.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/nodepda.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/xpc.h>
-
+#include "xpc.h"
 
 /* XPC is exiting flag */
 int xpc_exiting;
 
-
 /* SH_IPI_ACCESS shub register value on startup */
 static u64 xpc_sh1_IPI_access;
 static u64 xpc_sh2_IPI_access0;
@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1;
 static u64 xpc_sh2_IPI_access2;
 static u64 xpc_sh2_IPI_access3;
 
-
 /* original protection values for each node */
 u64 xpc_prot_vec[MAX_NUMNODES];
 
-
 /* this partition's reserved page pointers */
 struct xpc_rsvd_page *xpc_rsvd_page;
 static u64 *xpc_part_nasids;
@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part;
 static int xp_nasid_mask_bytes;        /* actual size in bytes of nasid mask */
 static int xp_nasid_mask_words;        /* actual size in words of nasid mask */
 
-
 /*
  * For performance reasons, each entry of xpc_partitions[] is cacheline
  * aligned. And xpc_partitions[] is padded with an additional entry at the
@@ -66,7 +59,6 @@ static int xp_nasid_mask_words;       /* actual size in words of nasid mask */
  */
 struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
 
-
 /*
  * Generic buffer used to store a local copy of portions of a remote
  * partition's reserved page (either its header and part_nasids mask,
@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
 char *xpc_remote_copy_buffer;
 void *xpc_remote_copy_buffer_base;
 
-
 /*
  * Guarantee that the kmalloc'd memory is cacheline aligned.
  */
@@ -84,22 +75,21 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
 {
        /* see if kmalloc will give us cachline aligned memory by default */
        *base = kmalloc(size, flags);
-       if (*base == NULL) {
+       if (*base == NULL)
                return NULL;
-       }
-       if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+
+       if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
                return *base;
-       }
+
        kfree(*base);
 
        /* nope, we'll have to do it ourselves */
        *base = kmalloc(size + L1_CACHE_BYTES, flags);
-       if (*base == NULL) {
+       if (*base == NULL)
                return NULL;
-       }
-       return (void *) L1_CACHE_ALIGN((u64) *base);
-}
 
+       return (void *)L1_CACHE_ALIGN((u64)*base);
+}
 
 /*
  * Given a nasid, get the physical address of the  partition's reserved page
@@ -117,25 +107,24 @@ xpc_get_rsvd_page_pa(int nasid)
        u64 buf_len = 0;
        void *buf_base = NULL;
 
-
        while (1) {
 
                status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
-                                                               &len);
+                                                      &len);
 
                dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
                        "0x%016lx, address=0x%016lx, len=0x%016lx\n",
                        status, cookie, rp_pa, len);
 
-               if (status != SALRET_MORE_PASSES) {
+               if (status != SALRET_MORE_PASSES)
                        break;
-               }
 
                if (L1_CACHE_ALIGN(len) > buf_len) {
                        kfree(buf_base);
                        buf_len = L1_CACHE_ALIGN(len);
-                       buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
-                                                       GFP_KERNEL, &buf_base);
+                       buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
+                                                                GFP_KERNEL,
+                                                                &buf_base);
                        if (buf_base == NULL) {
                                dev_err(xpc_part, "unable to kmalloc "
                                        "len=0x%016lx\n", buf_len);
@@ -145,7 +134,7 @@ xpc_get_rsvd_page_pa(int nasid)
                }
 
                bte_res = xp_bte_copy(rp_pa, buf, buf_len,
-                                       (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+                                     (BTE_NOTIFY | BTE_WACQUIRE), NULL);
                if (bte_res != BTE_SUCCESS) {
                        dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
                        status = SALRET_ERROR;
@@ -155,14 +144,13 @@ xpc_get_rsvd_page_pa(int nasid)
 
        kfree(buf_base);
 
-       if (status != SALRET_OK) {
+       if (status != SALRET_OK)
                rp_pa = 0;
-       }
+
        dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
        return rp_pa;
 }
 
-
 /*
  * Fill the partition reserved page with the information needed by
  * other partitions to discover we are alive and establish initial
@@ -176,7 +164,6 @@ xpc_rsvd_page_init(void)
        u64 rp_pa, nasid_array = 0;
        int i, ret;
 
-
        /* get the local reserved page's address */
 
        preempt_disable();
@@ -186,7 +173,7 @@ xpc_rsvd_page_init(void)
                dev_err(xpc_part, "SAL failed to locate the reserved page\n");
                return NULL;
        }
-       rp = (struct xpc_rsvd_page *) __va(rp_pa);
+       rp = (struct xpc_rsvd_page *)__va(rp_pa);
 
        if (rp->partid != sn_partition_id) {
                dev_err(xpc_part, "the reserved page's partid of %d should be "
@@ -222,8 +209,9 @@ xpc_rsvd_page_init(void)
         * on subsequent loads of XPC. This AMO page is never freed, and its
         * memory protections are never restricted.
         */
-       if ((amos_page = xpc_vars->amos_page) == NULL) {
-               amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
+       amos_page = xpc_vars->amos_page;
+       if (amos_page == NULL) {
+               amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
                if (amos_page == NULL) {
                        dev_err(xpc_part, "can't allocate page of AMOs\n");
                        return NULL;
@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void)
                 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
                 */
                if (!enable_shub_wars_1_1()) {
-                       ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
-                                       PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
-                                       &nasid_array);
+                       ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
+                                                  PAGE_SIZE,
+                                                  SN_MEMPROT_ACCESS_CLASS_1,
+                                                  &nasid_array);
                        if (ret != 0) {
                                dev_err(xpc_part, "can't change memory "
                                        "protections\n");
                                uncached_free_page(__IA64_UNCACHED_OFFSET |
-                                                  TO_PHYS((u64) amos_page));
+                                                  TO_PHYS((u64)amos_page));
                                return NULL;
                        }
                }
-       } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
+       } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
                /*
                 * EFI's XPBOOT can also set amos_page in the reserved page,
                 * but it happens to leave it as an uncached physical address
                 * and we need it to be an uncached virtual, so we'll have to
                 * convert it.
                 */
-               if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
+               if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
                        dev_err(xpc_part, "previously used amos_page address "
-                               "is bad = 0x%p\n", (void *) amos_page);
+                               "is bad = 0x%p\n", (void *)amos_page);
                        return NULL;
                }
-               amos_page = (AMO_t *) TO_AMO((u64) amos_page);
+               amos_page = (AMO_t *)TO_AMO((u64)amos_page);
        }
 
        /* clear xpc_vars */
@@ -267,22 +256,20 @@ xpc_rsvd_page_init(void)
        xpc_vars->act_nasid = cpuid_to_nasid(0);
        xpc_vars->act_phys_cpuid = cpu_physical_id(0);
        xpc_vars->vars_part_pa = __pa(xpc_vars_part);
-       xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
-       xpc_vars->amos_page = amos_page;  /* save for next load of XPC */
-
+       xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
+       xpc_vars->amos_page = amos_page;        /* save for next load of XPC */
 
        /* clear xpc_vars_part */
-       memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
-                                                       XP_MAX_PARTITIONS);
+       memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
+              XP_MAX_PARTITIONS);
 
        /* initialize the activate IRQ related AMO variables */
-       for (i = 0; i < xp_nasid_mask_words; i++) {
-               (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
-       }
+       for (i = 0; i < xp_nasid_mask_words; i++)
+               (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
 
        /* initialize the engaged remote partitions related AMO variables */
-       (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
-       (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
+       (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
+       (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
 
        /* timestamp of when reserved page was setup by XPC */
        rp->stamp = CURRENT_TIME;
@@ -296,7 +283,6 @@ xpc_rsvd_page_init(void)
        return rp;
 }
 
-
 /*
  * Change protections to allow IPI operations (and AMO operations on
  * Shub 1.1 systems).
@@ -307,39 +293,38 @@ xpc_allow_IPI_ops(void)
        int node;
        int nasid;
 
-
-       // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
+       /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
 
        if (is_shub2()) {
                xpc_sh2_IPI_access0 =
-                       (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
+                   (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
                xpc_sh2_IPI_access1 =
-                       (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
+                   (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
                xpc_sh2_IPI_access2 =
-                       (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
+                   (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
                xpc_sh2_IPI_access3 =
-                       (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
+                   (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
 
                for_each_online_node(node) {
                        nasid = cnodeid_to_nasid(node);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
-                                                               -1UL);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
-                                                               -1UL);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
-                                                               -1UL);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
-                                                               -1UL);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+                             -1UL);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+                             -1UL);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+                             -1UL);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+                             -1UL);
                }
 
        } else {
                xpc_sh1_IPI_access =
-                       (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
+                   (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
 
                for_each_online_node(node) {
                        nasid = cnodeid_to_nasid(node);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
-                                                               -1UL);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+                             -1UL);
 
                        /*
                         * Since the BIST collides with memory operations on
@@ -347,21 +332,23 @@ xpc_allow_IPI_ops(void)
                         */
                        if (enable_shub_wars_1_1()) {
                                /* open up everything */
-                               xpc_prot_vec[node] = (u64) HUB_L((u64 *)
-                                               GLOBAL_MMR_ADDR(nasid,
-                                               SH1_MD_DQLP_MMR_DIR_PRIVEC0));
-                               HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-                                               SH1_MD_DQLP_MMR_DIR_PRIVEC0),
-                                                               -1UL);
-                               HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-                                               SH1_MD_DQRP_MMR_DIR_PRIVEC0),
-                                                               -1UL);
+                               xpc_prot_vec[node] = (u64)HUB_L((u64 *)
+                                                               GLOBAL_MMR_ADDR
+                                                               (nasid,
+                                                 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+                               HUB_S((u64 *)
+                                     GLOBAL_MMR_ADDR(nasid,
+                                                  SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+                                     -1UL);
+                               HUB_S((u64 *)
+                                     GLOBAL_MMR_ADDR(nasid,
+                                                  SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+                                     -1UL);
                        }
                }
        }
 }
 
-
 /*
  * Restrict protections to disallow IPI operations (and AMO operations on
  * Shub 1.1 systems).
@@ -372,43 +359,41 @@ xpc_restrict_IPI_ops(void)
        int node;
        int nasid;
 
-
-       // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
+       /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
 
        if (is_shub2()) {
 
                for_each_online_node(node) {
                        nasid = cnodeid_to_nasid(node);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
-                                                       xpc_sh2_IPI_access0);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
-                                                       xpc_sh2_IPI_access1);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
-                                                       xpc_sh2_IPI_access2);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
-                                                       xpc_sh2_IPI_access3);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+                             xpc_sh2_IPI_access0);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+                             xpc_sh2_IPI_access1);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+                             xpc_sh2_IPI_access2);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+                             xpc_sh2_IPI_access3);
                }
 
        } else {
 
                for_each_online_node(node) {
                        nasid = cnodeid_to_nasid(node);
-                       HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
-                                                       xpc_sh1_IPI_access);
+                       HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+                             xpc_sh1_IPI_access);
 
                        if (enable_shub_wars_1_1()) {
-                               HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-                                               SH1_MD_DQLP_MMR_DIR_PRIVEC0),
-                                                       xpc_prot_vec[node]);
-                               HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-                                               SH1_MD_DQRP_MMR_DIR_PRIVEC0),
-                                                       xpc_prot_vec[node]);
+                               HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+                                                  SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+                                     xpc_prot_vec[node]);
+                               HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+                                                  SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+                                     xpc_prot_vec[node]);
                        }
                }
        }
 }
 
-
 /*
  * At periodic intervals, scan through all active partitions and ensure
  * their heartbeat is still active.  If not, the partition is deactivated.
@@ -421,34 +406,31 @@ xpc_check_remote_hb(void)
        partid_t partid;
        bte_result_t bres;
 
-
-       remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+       remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
 
        for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
 
-               if (xpc_exiting) {
+               if (xpc_exiting)
                        break;
-               }
 
-               if (partid == sn_partition_id) {
+               if (partid == sn_partition_id)
                        continue;
-               }
 
                part = &xpc_partitions[partid];
 
                if (part->act_state == XPC_P_INACTIVE ||
-                               part->act_state == XPC_P_DEACTIVATING) {
+                   part->act_state == XPC_P_DEACTIVATING) {
                        continue;
                }
 
                /* pull the remote_hb cache line */
                bres = xp_bte_copy(part->remote_vars_pa,
-                                       (u64) remote_vars,
-                                       XPC_RP_VARS_SIZE,
-                                       (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+                                  (u64)remote_vars,
+                                  XPC_RP_VARS_SIZE,
+                                  (BTE_NOTIFY | BTE_WACQUIRE), NULL);
                if (bres != BTE_SUCCESS) {
                        XPC_DEACTIVATE_PARTITION(part,
-                                               xpc_map_bte_errors(bres));
+                                                xpc_map_bte_errors(bres));
                        continue;
                }
 
@@ -459,8 +441,8 @@ xpc_check_remote_hb(void)
                        remote_vars->heartbeating_to_mask);
 
                if (((remote_vars->heartbeat == part->last_heartbeat) &&
-                       (remote_vars->heartbeat_offline == 0)) ||
-                            !xpc_hb_allowed(sn_partition_id, remote_vars)) {
+                    (remote_vars->heartbeat_offline == 0)) ||
+                   !xpc_hb_allowed(sn_partition_id, remote_vars)) {
 
                        XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
                        continue;
@@ -470,7 +452,6 @@ xpc_check_remote_hb(void)
        }
 }
 
-
 /*
  * Get a copy of a portion of the remote partition's rsvd page.
  *
@@ -480,59 +461,48 @@ xpc_check_remote_hb(void)
  */
 static enum xpc_retval
 xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
-               struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
+                 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
 {
        int bres, i;
 
-
        /* get the reserved page's physical address */
 
        *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
-       if (*remote_rp_pa == 0) {
+       if (*remote_rp_pa == 0)
                return xpcNoRsvdPageAddr;
-       }
-
 
        /* pull over the reserved page header and part_nasids mask */
-       bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
-                               XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
-                               (BTE_NOTIFY | BTE_WACQUIRE), NULL);
-       if (bres != BTE_SUCCESS) {
+       bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
+                          XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
+                          (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+       if (bres != BTE_SUCCESS)
                return xpc_map_bte_errors(bres);
-       }
-
 
        if (discovered_nasids != NULL) {
                u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
 
-
-               for (i = 0; i < xp_nasid_mask_words; i++) {
+               for (i = 0; i < xp_nasid_mask_words; i++)
                        discovered_nasids[i] |= remote_part_nasids[i];
-               }
        }
 
-
        /* check that the partid is for another partition */
 
        if (remote_rp->partid < 1 ||
-                               remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
+           remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
                return xpcInvalidPartid;
        }
 
-       if (remote_rp->partid == sn_partition_id) {
+       if (remote_rp->partid == sn_partition_id)
                return xpcLocalPartid;
-       }
-
 
        if (XPC_VERSION_MAJOR(remote_rp->version) !=
-                                       XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
+           XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
                return xpcBadVersion;
        }
 
        return xpcSuccess;
 }
 
-
 /*
  * Get a copy of the remote partition's XPC variables from the reserved page.
  *
@@ -544,34 +514,30 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
 {
        int bres;
 
-
-       if (remote_vars_pa == 0) {
+       if (remote_vars_pa == 0)
                return xpcVarsNotSet;
-       }
 
        /* pull over the cross partition variables */
-       bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
-                               (BTE_NOTIFY | BTE_WACQUIRE), NULL);
-       if (bres != BTE_SUCCESS) {
+       bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
+                          (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+       if (bres != BTE_SUCCESS)
                return xpc_map_bte_errors(bres);
-       }
 
        if (XPC_VERSION_MAJOR(remote_vars->version) !=
-                                       XPC_VERSION_MAJOR(XPC_V_VERSION)) {
+           XPC_VERSION_MAJOR(XPC_V_VERSION)) {
                return xpcBadVersion;
        }
 
        return xpcSuccess;
 }
 
-
 /*
  * Update the remote partition's info.
  */
 static void
 xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
-               struct timespec *remote_rp_stamp, u64 remote_rp_pa,
-               u64 remote_vars_pa, struct xpc_vars *remote_vars)
+                         struct timespec *remote_rp_stamp, u64 remote_rp_pa,
+                         u64 remote_vars_pa, struct xpc_vars *remote_vars)
 {
        part->remote_rp_version = remote_rp_version;
        dev_dbg(xpc_part, "  remote_rp_version = 0x%016x\n",
@@ -613,7 +579,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
                part->remote_vars_version);
 }
 
-
 /*
  * Prior code has determined the nasid which generated an IPI.  Inspect
  * that nasid to determine if its partition needs to be activated or
@@ -643,54 +608,51 @@ xpc_identify_act_IRQ_req(int nasid)
        struct xpc_partition *part;
        enum xpc_retval ret;
 
-
        /* pull over the reserved page structure */
 
-       remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
+       remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
 
        ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
        if (ret != xpcSuccess) {
                dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
-                       "which sent interrupt, reason=%d\n", nasid, ret);
+                        "which sent interrupt, reason=%d\n", nasid, ret);
                return;
        }
 
        remote_vars_pa = remote_rp->vars_pa;
        remote_rp_version = remote_rp->version;
-       if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
+       if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
                remote_rp_stamp = remote_rp->stamp;
-       }
+
        partid = remote_rp->partid;
        part = &xpc_partitions[partid];
 
-
        /* pull over the cross partition variables */
 
-       remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+       remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
 
        ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
        if (ret != xpcSuccess) {
 
                dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
-                       "which sent interrupt, reason=%d\n", nasid, ret);
+                        "which sent interrupt, reason=%d\n", nasid, ret);
 
                XPC_DEACTIVATE_PARTITION(part, ret);
                return;
        }
 
-
        part->act_IRQ_rcvd++;
 
        dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
-               "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
+               "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
                remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
 
        if (xpc_partition_disengaged(part) &&
-                                       part->act_state == XPC_P_INACTIVE) {
+           part->act_state == XPC_P_INACTIVE) {
 
                xpc_update_partition_info(part, remote_rp_version,
-                                       &remote_rp_stamp, remote_rp_pa,
-                                       remote_vars_pa, remote_vars);
+                                         &remote_rp_stamp, remote_rp_pa,
+                                         remote_vars_pa, remote_vars);
 
                if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
                        if (xpc_partition_disengage_requested(1UL << partid)) {
@@ -714,16 +676,15 @@ xpc_identify_act_IRQ_req(int nasid)
 
        if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
                DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
-                                                       remote_vars_version));
+                                                      remote_vars_version));
 
                if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
                        DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
-                                                               version));
+                                                              version));
                        /* see if the other side rebooted */
                        if (part->remote_amos_page_pa ==
-                               remote_vars->amos_page_pa &&
-                                       xpc_hb_allowed(sn_partition_id,
-                                                               remote_vars)) {
+                           remote_vars->amos_page_pa &&
+                           xpc_hb_allowed(sn_partition_id, remote_vars)) {
                                /* doesn't look that way, so ignore the IPI */
                                return;
                        }
@@ -735,8 +696,8 @@ xpc_identify_act_IRQ_req(int nasid)
                 */
 
                xpc_update_partition_info(part, remote_rp_version,
-                                               &remote_rp_stamp, remote_rp_pa,
-                                               remote_vars_pa, remote_vars);
+                                         &remote_rp_stamp, remote_rp_pa,
+                                         remote_vars_pa, remote_vars);
                part->reactivate_nasid = nasid;
                XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
                return;
@@ -756,15 +717,15 @@ xpc_identify_act_IRQ_req(int nasid)
                xpc_clear_partition_disengage_request(1UL << partid);
 
                xpc_update_partition_info(part, remote_rp_version,
-                                               &remote_rp_stamp, remote_rp_pa,
-                                               remote_vars_pa, remote_vars);
+                                         &remote_rp_stamp, remote_rp_pa,
+                                         remote_vars_pa, remote_vars);
                reactivate = 1;
 
        } else {
                DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
 
                stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
-                                                       &remote_rp_stamp);
+                                               &remote_rp_stamp);
                if (stamp_diff != 0) {
                        DBUG_ON(stamp_diff >= 0);
 
@@ -775,17 +736,18 @@ xpc_identify_act_IRQ_req(int nasid)
 
                        DBUG_ON(xpc_partition_engaged(1UL << partid));
                        DBUG_ON(xpc_partition_disengage_requested(1UL <<
-                                                               partid));
+                                                                 partid));
 
                        xpc_update_partition_info(part, remote_rp_version,
-                                               &remote_rp_stamp, remote_rp_pa,
-                                               remote_vars_pa, remote_vars);
+                                                 &remote_rp_stamp,
+                                                 remote_rp_pa, remote_vars_pa,
+                                                 remote_vars);
                        reactivate = 1;
                }
        }
 
        if (part->disengage_request_timeout > 0 &&
-                                       !xpc_partition_disengaged(part)) {
+           !xpc_partition_disengaged(part)) {
                /* still waiting on other side to disengage from us */
                return;
        }
@@ -795,12 +757,11 @@ xpc_identify_act_IRQ_req(int nasid)
                XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
 
        } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
-                       xpc_partition_disengage_requested(1UL << partid)) {
+                  xpc_partition_disengage_requested(1UL << partid)) {
                XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
        }
 }
 
-
 /*
  * Loop through the activation AMO variables and process any bits
  * which are set.  Each bit indicates a nasid sending a partition
@@ -813,20 +774,17 @@ xpc_identify_act_IRQ_sender(void)
 {
        int word, bit;
        u64 nasid_mask;
-       u64 nasid;                      /* remote nasid */
+       u64 nasid;              /* remote nasid */
        int n_IRQs_detected = 0;
        AMO_t *act_amos;
 
-
        act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
 
-
        /* scan through act AMO variable looking for non-zero entries */
        for (word = 0; word < xp_nasid_mask_words; word++) {
 
-               if (xpc_exiting) {
+               if (xpc_exiting)
                        break;
-               }
 
                nasid_mask = xpc_IPI_receive(&act_amos[word]);
                if (nasid_mask == 0) {
@@ -837,7 +795,6 @@ xpc_identify_act_IRQ_sender(void)
                dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
                        nasid_mask);
 
-
                /*
                 * If this nasid has been added to the machine since
                 * our partition was reset, this will retain the
@@ -846,7 +803,6 @@ xpc_identify_act_IRQ_sender(void)
                 */
                xpc_mach_nasids[word] |= nasid_mask;
 
-
                /* locate the nasid(s) which sent interrupts */
 
                for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
@@ -862,7 +818,6 @@ xpc_identify_act_IRQ_sender(void)
        return n_IRQs_detected;
 }
 
-
 /*
  * See if the other side has responded to a partition disengage request
  * from us.
@@ -873,11 +828,11 @@ xpc_partition_disengaged(struct xpc_partition *part)
        partid_t partid = XPC_PARTID(part);
        int disengaged;
 
-
        disengaged = (xpc_partition_engaged(1UL << partid) == 0);
        if (part->disengage_request_timeout) {
                if (!disengaged) {
-                       if (time_before(jiffies, part->disengage_request_timeout)) {
+                       if (time_before(jiffies,
+                           part->disengage_request_timeout)) {
                                /* timelimit hasn't been reached yet */
                                return 0;
                        }
@@ -888,7 +843,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
                         */
 
                        dev_info(xpc_part, "disengage from remote partition %d "
-                               "timed out\n", partid);
+                                "timed out\n", partid);
                        xpc_disengage_request_timedout = 1;
                        xpc_clear_partition_engaged(1UL << partid);
                        disengaged = 1;
@@ -898,23 +853,20 @@ xpc_partition_disengaged(struct xpc_partition *part)
                /* cancel the timer function, provided it's not us */
                if (!in_interrupt()) {
                        del_singleshot_timer_sync(&part->
-                                                     disengage_request_timer);
+                                                 disengage_request_timer);
                }
 
                DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
-                                       part->act_state != XPC_P_INACTIVE);
-               if (part->act_state != XPC_P_INACTIVE) {
+                       part->act_state != XPC_P_INACTIVE);
+               if (part->act_state != XPC_P_INACTIVE)
                        xpc_wakeup_channel_mgr(part);
-               }
 
-               if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
+               if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
                        xpc_cancel_partition_disengage_request(part);
-               }
        }
        return disengaged;
 }
 
-
 /*
  * Mark specified partition as active.
  */
@@ -924,7 +876,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
        unsigned long irq_flags;
        enum xpc_retval ret;
 
-
        dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
 
        spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -940,17 +891,15 @@ xpc_mark_partition_active(struct xpc_partition *part)
        return ret;
 }
 
-
 /*
  * Notify XPC that the partition is down.
  */
 void
 xpc_deactivate_partition(const int line, struct xpc_partition *part,
-                               enum xpc_retval reason)
+                        enum xpc_retval reason)
 {
        unsigned long irq_flags;
 
-
        spin_lock_irqsave(&part->act_lock, irq_flags);
 
        if (part->act_state == XPC_P_INACTIVE) {
@@ -964,7 +913,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
        }
        if (part->act_state == XPC_P_DEACTIVATING) {
                if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
-                                       reason == xpcReactivating) {
+                   reason == xpcReactivating) {
                        XPC_SET_REASON(part, reason, line);
                }
                spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -982,9 +931,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
 
                /* set a timelimit on the disengage request */
                part->disengage_request_timeout = jiffies +
-                                       (xpc_disengage_request_timelimit * HZ);
+                   (xpc_disengage_request_timelimit * HZ);
                part->disengage_request_timer.expires =
-                                       part->disengage_request_timeout;
+                   part->disengage_request_timeout;
                add_timer(&part->disengage_request_timer);
        }
 
@@ -994,7 +943,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
        xpc_partition_going_down(part, reason);
 }
 
-
 /*
  * Mark specified partition as inactive.
  */
@@ -1003,7 +951,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
 {
        unsigned long irq_flags;
 
-
        dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
                XPC_PARTID(part));
 
@@ -1013,7 +960,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
        part->remote_rp_pa = 0;
 }
 
-
 /*
  * SAL has provided a partition and machine mask.  The partition mask
  * contains a bit for each even nasid in our partition.  The machine
@@ -1041,24 +987,22 @@ xpc_discovery(void)
        u64 *discovered_nasids;
        enum xpc_retval ret;
 
-
        remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
-                                               xp_nasid_mask_bytes,
-                                               GFP_KERNEL, &remote_rp_base);
-       if (remote_rp == NULL) {
+                                                 xp_nasid_mask_bytes,
+                                                 GFP_KERNEL, &remote_rp_base);
+       if (remote_rp == NULL)
                return;
-       }
-       remote_vars = (struct xpc_vars *) remote_rp;
 
+       remote_vars = (struct xpc_vars *)remote_rp;
 
        discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
-                                                       GFP_KERNEL);
+                                   GFP_KERNEL);
        if (discovered_nasids == NULL) {
                kfree(remote_rp_base);
                return;
        }
 
-       rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
+       rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
 
        /*
         * The term 'region' in this context refers to the minimum number of
@@ -1081,23 +1025,19 @@ xpc_discovery(void)
 
        for (region = 0; region < max_regions; region++) {
 
-               if ((volatile int) xpc_exiting) {
+               if (xpc_exiting)
                        break;
-               }
 
                dev_dbg(xpc_part, "searching region %d\n", region);
 
                for (nasid = (region * region_size * 2);
-                    nasid < ((region + 1) * region_size * 2);
-                    nasid += 2) {
+                    nasid < ((region + 1) * region_size * 2); nasid += 2) {
 
-                       if ((volatile int) xpc_exiting) {
+                       if (xpc_exiting)
                                break;
-                       }
 
                        dev_dbg(xpc_part, "checking nasid %d\n", nasid);
 
-
                        if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
                                dev_dbg(xpc_part, "PROM indicates Nasid %d is "
                                        "part of the local partition; skipping "
@@ -1119,19 +1059,18 @@ xpc_discovery(void)
                                continue;
                        }
 
-
                        /* pull over the reserved page structure */
 
                        ret = xpc_get_remote_rp(nasid, discovered_nasids,
-                                             remote_rp, &remote_rp_pa);
+                                               remote_rp, &remote_rp_pa);
                        if (ret != xpcSuccess) {
                                dev_dbg(xpc_part, "unable to get reserved page "
                                        "from nasid %d, reason=%d\n", nasid,
                                        ret);
 
-                               if (ret == xpcLocalPartid) {
+                               if (ret == xpcLocalPartid)
                                        break;
-                               }
+
                                continue;
                        }
 
@@ -1140,7 +1079,6 @@ xpc_discovery(void)
                        partid = remote_rp->partid;
                        part = &xpc_partitions[partid];
 
-
                        /* pull over the cross partition variables */
 
                        ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
@@ -1171,15 +1109,15 @@ xpc_discovery(void)
                         * get the same page for remote_act_amos_pa after
                         * module reloads and system reboots.
                         */
-                       if (sn_register_xp_addr_region(
-                                           remote_vars->amos_page_pa,
-                                                       PAGE_SIZE, 1) < 0) {
-                               dev_dbg(xpc_part, "partition %d failed to "
+                       if (sn_register_xp_addr_region
+                           (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
+                               dev_dbg(xpc_part,
+                                       "partition %d failed to "
                                        "register xp_addr region 0x%016lx\n",
                                        partid, remote_vars->amos_page_pa);
 
                                XPC_SET_REASON(part, xpcPhysAddrRegFailed,
-                                               __LINE__);
+                                              __LINE__);
                                break;
                        }
 
@@ -1195,9 +1133,9 @@ xpc_discovery(void)
                                remote_vars->act_phys_cpuid);
 
                        if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
-                                                               version)) {
+                                                          version)) {
                                part->remote_amos_page_pa =
-                                               remote_vars->amos_page_pa;
+                                   remote_vars->amos_page_pa;
                                xpc_mark_partition_disengaged(part);
                                xpc_cancel_partition_disengage_request(part);
                        }
@@ -1209,7 +1147,6 @@ xpc_discovery(void)
        kfree(remote_rp_base);
 }
 
-
 /*
  * Given a partid, get the nasids owned by that partition from the
  * remote partition's reserved page.
@@ -1221,19 +1158,17 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
        u64 part_nasid_pa;
        int bte_res;
 
-
        part = &xpc_partitions[partid];
-       if (part->remote_rp_pa == 0) {
+       if (part->remote_rp_pa == 0)
                return xpcPartitionDown;
-       }
 
        memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
 
-       part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
+       part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
 
-       bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
-                       xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+       bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
+                             xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
+                             NULL);
 
        return xpc_map_bte_errors(bte_res);
 }
-
similarity index 89%
rename from arch/ia64/sn/kernel/xpnet.c
rename to drivers/misc/sgi-xp/xpnet.c
index a5df672..a9543c6 100644 (file)
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
-
 /*
  * Cross Partition Network Interface (XPNET) support
  *
@@ -21,8 +20,8 @@
  *
  */
 
-
 #include <linux/module.h>
+#include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <asm/sn/bte.h>
 #include <asm/sn/io.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/types.h>
 #include <asm/atomic.h>
-#include <asm/sn/xp.h>
-
+#include "xp.h"
 
 /*
  * The message payload transferred by XPC.
@@ -79,7 +76,6 @@ struct xpnet_message {
 #define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
 #define XPNET_MSG_NENTRIES     (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
 
-
 #define XPNET_MAX_KTHREADS     (XPNET_MSG_NENTRIES + 1)
 #define XPNET_MAX_IDLE_KTHREADS        (XPNET_MSG_NENTRIES + 1)
 
@@ -91,9 +87,9 @@ struct xpnet_message {
 #define XPNET_VERSION_MAJOR(_v)                ((_v) >> 4)
 #define XPNET_VERSION_MINOR(_v)                ((_v) & 0xf)
 
-#define        XPNET_VERSION _XPNET_VERSION(1,0)               /* version 1.0 */
-#define        XPNET_VERSION_EMBED _XPNET_VERSION(1,1)         /* version 1.1 */
-#define XPNET_MAGIC    0x88786984 /* "XNET" */
+#define        XPNET_VERSION _XPNET_VERSION(1, 0)      /* version 1.0 */
+#define        XPNET_VERSION_EMBED _XPNET_VERSION(1, 1)        /* version 1.1 */
+#define XPNET_MAGIC    0x88786984      /* "XNET" */
 
 #define XPNET_VALID_MSG(_m)                                                 \
    ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
@@ -101,7 +97,6 @@ struct xpnet_message {
 
 #define XPNET_DEVICE_NAME              "xp0"
 
-
 /*
  * When messages are queued with xpc_send_notify, a kmalloc'd buffer
  * of the following type is passed as a notification cookie.  When the
@@ -145,7 +140,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
 /* 32KB has been determined to be the ideal */
 #define XPNET_DEF_MTU (0x8000UL)
 
-
 /*
  * The partition id is encapsulated in the MAC address.  The following
  * define locates the octet the partid is in.
@@ -153,7 +147,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
 #define XPNET_PARTID_OCTET     1
 #define XPNET_LICENSE_OCTET    2
 
-
 /*
  * Define the XPNET debug device structure that is to be used with dev_dbg(),
  * dev_err(), dev_warn(), and dev_info().
@@ -163,7 +156,7 @@ struct device_driver xpnet_dbg_name = {
 };
 
 struct device xpnet_dbg_subname = {
-       .bus_id = {0},                  /* set to "" */
+       .bus_id = {0},          /* set to "" */
        .driver = &xpnet_dbg_name
 };
 
@@ -178,14 +171,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
        struct sk_buff *skb;
        bte_result_t bret;
        struct xpnet_dev_private *priv =
-               (struct xpnet_dev_private *) xpnet_device->priv;
-
+           (struct xpnet_dev_private *)xpnet_device->priv;
 
        if (!XPNET_VALID_MSG(msg)) {
                /*
                 * Packet with a different XPC version.  Ignore.
                 */
-               xpc_received(partid, channel, (void *) msg);
+               xpc_received(partid, channel, (void *)msg);
 
                priv->stats.rx_errors++;
 
@@ -194,14 +186,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
        dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
                msg->leadin_ignore, msg->tailout_ignore);
 
-
        /* reserve an extra cache line */
        skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
        if (!skb) {
                dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
                        msg->size + L1_CACHE_BYTES);
 
-               xpc_received(partid, channel, (void *) msg);
+               xpc_received(partid, channel, (void *)msg);
 
                priv->stats.rx_errors++;
 
@@ -227,12 +218,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
         * Move the data over from the other side.
         */
        if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
-                                               (msg->embedded_bytes != 0)) {
+           (msg->embedded_bytes != 0)) {
                dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
                        "%lu)\n", skb->data, &msg->data,
-                       (size_t) msg->embedded_bytes);
+                       (size_t)msg->embedded_bytes);
 
-               skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes);
+               skb_copy_to_linear_data(skb, &msg->data,
+                                       (size_t)msg->embedded_bytes);
        } else {
                dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
                        "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -244,16 +236,18 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
                                msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
 
                if (bret != BTE_SUCCESS) {
-                       // >>> Need better way of cleaning skb.  Currently skb
-                       // >>> appears in_use and we can't just call
-                       // >>> dev_kfree_skb.
+                       /*
+                        * >>> Need better way of cleaning skb.  Currently skb
+                        * >>> appears in_use and we can't just call
+                        * >>> dev_kfree_skb.
+                        */
                        dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
                                "error=0x%x\n", (void *)msg->buf_pa,
                                (void *)__pa((u64)skb->data &
-                                                       ~(L1_CACHE_BYTES - 1)),
+                                            ~(L1_CACHE_BYTES - 1)),
                                msg->size, bret);
 
-                       xpc_received(partid, channel, (void *) msg);
+                       xpc_received(partid, channel, (void *)msg);
 
                        priv->stats.rx_errors++;
 
@@ -262,7 +256,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
        }
 
        dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
-               "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+               "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
                (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
                skb->len);
 
@@ -275,16 +269,14 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
                (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
                skb_end_pointer(skb), skb->len);
 
-
        xpnet_device->last_rx = jiffies;
        priv->stats.rx_packets++;
        priv->stats.rx_bytes += skb->len + ETH_HLEN;
 
        netif_rx_ni(skb);
-       xpc_received(partid, channel, (void *) msg);
+       xpc_received(partid, channel, (void *)msg);
 }
 
-
 /*
  * This is the handler which XPC calls during any sort of change in
  * state or message reception on a connection.
@@ -295,20 +287,19 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
 {
        long bp;
 
-
        DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
        DBUG_ON(channel != XPC_NET_CHANNEL);
 
-       switch(reason) {
+       switch (reason) {
        case xpcMsgReceived:    /* message received */
                DBUG_ON(data == NULL);
 
-               xpnet_receive(partid, channel, (struct xpnet_message *) data);
+               xpnet_receive(partid, channel, (struct xpnet_message *)data);
                break;
 
        case xpcConnected:      /* connection completed to a partition */
                spin_lock_bh(&xpnet_broadcast_lock);
-               xpnet_broadcast_partitions |= 1UL << (partid -);
+               xpnet_broadcast_partitions |= 1UL << (partid - 1);
                bp = xpnet_broadcast_partitions;
                spin_unlock_bh(&xpnet_broadcast_lock);
 
@@ -321,13 +312,12 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
 
        default:
                spin_lock_bh(&xpnet_broadcast_lock);
-               xpnet_broadcast_partitions &= ~(1UL << (partid -));
+               xpnet_broadcast_partitions &= ~(1UL << (partid - 1));
                bp = xpnet_broadcast_partitions;
                spin_unlock_bh(&xpnet_broadcast_lock);
 
-               if (bp == 0) {
+               if (bp == 0)
                        netif_carrier_off(xpnet_device);
-               }
 
                dev_dbg(xpnet, "%s disconnected from partition %d; "
                        "xpnet_broadcast_partitions=0x%lx\n",
@@ -337,13 +327,11 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
        }
 }
 
-
 static int
 xpnet_dev_open(struct net_device *dev)
 {
        enum xpc_retval ret;
 
-
        dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
                "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
                XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
@@ -364,7 +352,6 @@ xpnet_dev_open(struct net_device *dev)
        return 0;
 }
 
-
 static int
 xpnet_dev_stop(struct net_device *dev)
 {
@@ -375,7 +362,6 @@ xpnet_dev_stop(struct net_device *dev)
        return 0;
 }
 
-
 static int
 xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -392,7 +378,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-
 /*
  * Required for the net_device structure.
  */
@@ -402,7 +387,6 @@ xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
        return 0;
 }
 
-
 /*
  * Return statistics to the caller.
  */
@@ -411,13 +395,11 @@ xpnet_dev_get_stats(struct net_device *dev)
 {
        struct xpnet_dev_private *priv;
 
-
-       priv = (struct xpnet_dev_private *) dev->priv;
+       priv = (struct xpnet_dev_private *)dev->priv;
 
        return &priv->stats;
 }
 
-
 /*
  * Notification that the other end has received the message and
  * DMA'd the skb information.  At this point, they are done with
@@ -426,11 +408,9 @@ xpnet_dev_get_stats(struct net_device *dev)
  */
 static void
 xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
-                       void *__qm)
+                    void *__qm)
 {
-       struct xpnet_pending_msg *queued_msg =
-               (struct xpnet_pending_msg *) __qm;
-
+       struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
 
        DBUG_ON(queued_msg == NULL);
 
@@ -439,14 +419,13 @@ xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
 
        if (atomic_dec_return(&queued_msg->use_count) == 0) {
                dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
-                       (void *) queued_msg->skb->head);
+                       (void *)queued_msg->skb->head);
 
                dev_kfree_skb_any(queued_msg->skb);
                kfree(queued_msg);
        }
 }
 
-
 /*
  * Network layer has formatted a packet (skb) and is ready to place it
  * "on the wire".  Prepare and send an xpnet_message to all partitions
@@ -469,16 +448,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct xpnet_dev_private *priv;
        u16 embedded_bytes;
 
-
-       priv = (struct xpnet_dev_private *) dev->priv;
-
+       priv = (struct xpnet_dev_private *)dev->priv;
 
        dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
-               "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+               "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
                (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
                skb->len);
 
-
        /*
         * The xpnet_pending_msg tracks how many outstanding
         * xpc_send_notifies are relying on this skb.  When none
@@ -487,16 +463,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
        if (queued_msg == NULL) {
                dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
-                       "packet\n", sizeof(struct xpnet_pending_msg));
+                        "packet\n", sizeof(struct xpnet_pending_msg));
 
                priv->stats.tx_errors++;
 
                return -ENOMEM;
        }
 
-
        /* get the beginning of the first cacheline and end of last */
-       start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
+       start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
        end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
 
        /* calculate how many bytes to embed in the XPC message */
@@ -506,7 +481,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                embedded_bytes = skb->len;
        }
 
-
        /*
         * Since the send occurs asynchronously, we set the count to one
         * and begin sending.  Any sends that happen to complete before
@@ -517,14 +491,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        atomic_set(&queued_msg->use_count, 1);
        queued_msg->skb = skb;
 
-
        second_mac_octet = skb->data[XPNET_PARTID_OCTET];
        if (second_mac_octet == 0xff) {
                /* we are being asked to broadcast to all partitions */
                dp = xpnet_broadcast_partitions;
        } else if (second_mac_octet != 0) {
                dp = xpnet_broadcast_partitions &
-                                       (1UL << (second_mac_octet - 1));
+                   (1UL << (second_mac_octet - 1));
        } else {
                /* 0 is an invalid partid.  Ignore */
                dp = 0;
@@ -543,7 +516,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
             dest_partid++) {
 
-
                if (!(dp & (1UL << (dest_partid - 1)))) {
                        /* not destined for this partition */
                        continue;
@@ -552,20 +524,18 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                /* remove this partition from the destinations mask */
                dp &= ~(1UL << (dest_partid - 1));
 
-
                /* found a partition to send to */
 
                ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
                                   XPC_NOWAIT, (void **)&msg);
-               if (unlikely(ret != xpcSuccess)) {
+               if (unlikely(ret != xpcSuccess))
                        continue;
-               }
 
                msg->embedded_bytes = embedded_bytes;
                if (unlikely(embedded_bytes != 0)) {
                        msg->version = XPNET_VERSION_EMBED;
                        dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
-                               &msg->data, skb->data, (size_t) embedded_bytes);
+                               &msg->data, skb->data, (size_t)embedded_bytes);
                        skb_copy_from_linear_data(skb, &msg->data,
                                                  (size_t)embedded_bytes);
                } else {
@@ -573,7 +543,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
                msg->magic = XPNET_MAGIC;
                msg->size = end_addr - start_addr;
-               msg->leadin_ignore = (u64) skb->data - start_addr;
+               msg->leadin_ignore = (u64)skb->data - start_addr;
                msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
                msg->buf_pa = __pa(start_addr);
 
@@ -583,7 +553,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
                        msg->leadin_ignore, msg->tailout_ignore);
 
-
                atomic_inc(&queued_msg->use_count);
 
                ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
@@ -592,14 +561,12 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        atomic_dec(&queued_msg->use_count);
                        continue;
                }
-
        }
 
        if (atomic_dec_return(&queued_msg->use_count) == 0) {
                dev_dbg(xpnet, "no partitions to receive packet destined for "
                        "%d\n", dest_partid);
 
-
                dev_kfree_skb(skb);
                kfree(queued_msg);
        }
@@ -610,23 +577,20 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
-
 /*
  * Deal with transmit timeouts coming from the network layer.
  */
 static void
-xpnet_dev_tx_timeout (struct net_device *dev)
+xpnet_dev_tx_timeout(struct net_device *dev)
 {
        struct xpnet_dev_private *priv;
 
-
-       priv = (struct xpnet_dev_private *) dev->priv;
+       priv = (struct xpnet_dev_private *)dev->priv;
 
        priv->stats.tx_errors++;
        return;
 }
 
-
 static int __init
 xpnet_init(void)
 {
@@ -634,10 +598,8 @@ xpnet_init(void)
        u32 license_num;
        int result = -ENOMEM;
 
-
-       if (!ia64_platform_is("sn2")) {
+       if (!ia64_platform_is("sn2"))
                return -ENODEV;
-       }
 
        dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
 
@@ -647,9 +609,8 @@ xpnet_init(void)
         */
        xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
                                    XPNET_DEVICE_NAME, ether_setup);
-       if (xpnet_device == NULL) {
+       if (xpnet_device == NULL)
                return -ENOMEM;
-       }
 
        netif_carrier_off(xpnet_device);
 
@@ -672,7 +633,7 @@ xpnet_init(void)
        license_num = sn_partition_serial_number_val();
        for (i = 3; i >= 0; i--) {
                xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
-                                                       license_num & 0xff;
+                   license_num & 0xff;
                license_num = license_num >> 8;
        }
 
@@ -690,29 +651,27 @@ xpnet_init(void)
        xpnet_device->features = NETIF_F_NO_CSUM;
 
        result = register_netdev(xpnet_device);
-       if (result != 0) {
+       if (result != 0)
                free_netdev(xpnet_device);
-       }
 
        return result;
 }
-module_init(xpnet_init);
 
+module_init(xpnet_init);
 
 static void __exit
 xpnet_exit(void)
 {
        dev_info(xpnet, "unregistering network device %s\n",
-               xpnet_device[0].name);
+                xpnet_device[0].name);
 
        unregister_netdev(xpnet_device);
 
        free_netdev(xpnet_device);
 }
-module_exit(xpnet_exit);
 
+module_exit(xpnet_exit);
 
 MODULE_AUTHOR("Silicon Graphics, Inc.");
 MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
 MODULE_LICENSE("GPL");
-
index f1663aa..18a4321 100644 (file)
@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
 struct ia64_mca_notify_die {
        struct ia64_sal_os_state *sos;
        int *monarch_cpu;
+       int *data;
 };
 
 DECLARE_PER_CPU(u64, ia64_mca_pal_base);