Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
authorLinus Torvalds <torvalds@g5.osdl.org>
Wed, 27 Sep 2006 17:53:30 +0000 (10:53 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 27 Sep 2006 17:53:30 +0000 (10:53 -0700)
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] minor reformatting to vmlinux.lds.S
  [IA64] CMC/CPE: Reverse the order of fetching log and checking poll threshold
  [IA64] PAL calls need physical mode, stacked
  [IA64] ar.fpsr not set on MCA/INIT kernel entry
  [IA64] printing support for MCA/INIT
  [IA64] trim output of show_mem()
  [IA64] show_mem() printk levels
  [IA64] Make gp value point to Region 5 in mca handler
  Revert "[IA64] Unwire set/get_robust_list"
  [IA64] Implement futex primitives
  [IA64-SGI] Do not request DMA memory for BTE
  [IA64] Move perfmon tables from thread_struct to pfm_context
  [IA64] Add interface so modules can discover whether multithreading is on.
  [IA64] kprobes: fixup the pagefault exception caused by probehandlers
  [IA64] kprobe opcode 16 bytes alignment on IA64
  [IA64] esi-support
  [IA64] Add "model name" to /proc/cpuinfo

1  2 
arch/ia64/Kconfig
arch/ia64/kernel/perfmon.c
arch/ia64/mm/contig.c
arch/ia64/mm/discontig.c
include/asm-ia64/smp.h

diff --combined arch/ia64/Kconfig
@@@ -66,6 -66,15 +66,6 @@@ config IA64_UNCACHED_ALLOCATO
        bool
        select GENERIC_ALLOCATOR
  
 -config DMA_IS_DMA32
 -      bool
 -      default y
 -
 -config DMA_IS_NORMAL
 -      bool
 -      depends on IA64_SGI_SN2
 -      default y
 -
  config AUDIT_ARCH
        bool
        default y
@@@ -356,9 -365,6 +356,9 @@@ config NODES_SHIF
          MAX_NUMNODES will be 2^(This value).
          If in doubt, use the default.
  
 +config ARCH_POPULATES_NODE_MAP
 +      def_bool y
 +
  # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
  # VIRTUAL_MEM_MAP has been retained for historical reasons.
  config VIRTUAL_MEM_MAP
@@@ -423,6 -429,14 +423,14 @@@ config IA64_PALINF
  config SGI_SN
        def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
  
+ config IA64_ESI
+       bool "ESI (Extensible SAL Interface) support"
+       help
+         If you say Y here, support is built into the kernel to
+         make ESI calls.  ESI calls are used to support vendor-specific
+         firmware extensions, such as the ability to inject memory-errors
+         for test-purposes.  If you're unsure, say N.
  source "drivers/sn/Kconfig"
  
  source "drivers/firmware/Kconfig"
@@@ -34,7 -34,6 +34,7 @@@
  #include <linux/file.h>
  #include <linux/poll.h>
  #include <linux/vfs.h>
 +#include <linux/smp.h>
  #include <linux/pagemap.h>
  #include <linux/mount.h>
  #include <linux/bitops.h>
@@@ -63,6 -62,9 +63,9 @@@
  
  #define PFM_INVALID_ACTIVATION        (~0UL)
  
+ #define PFM_NUM_PMC_REGS      64      /* PMC save area for ctxsw */
+ #define PFM_NUM_PMD_REGS      64      /* PMD save area for ctxsw */
  /*
   * depth of message queue
   */
@@@ -297,14 -299,17 +300,17 @@@ typedef struct pfm_context 
        unsigned long           ctx_reload_pmcs[4];     /* bitmask of force reload PMC on ctxsw in */
        unsigned long           ctx_used_monitors[4];   /* bitmask of monitor PMC being used */
  
-       unsigned long           ctx_pmcs[IA64_NUM_PMC_REGS];    /*  saved copies of PMC values */
+       unsigned long           ctx_pmcs[PFM_NUM_PMC_REGS];     /*  saved copies of PMC values */
  
        unsigned int            ctx_used_ibrs[1];               /* bitmask of used IBR (speedup ctxsw in) */
        unsigned int            ctx_used_dbrs[1];               /* bitmask of used DBR (speedup ctxsw in) */
        unsigned long           ctx_dbrs[IA64_NUM_DBG_REGS];    /* DBR values (cache) when not loaded */
        unsigned long           ctx_ibrs[IA64_NUM_DBG_REGS];    /* IBR values (cache) when not loaded */
  
-       pfm_counter_t           ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */
+       pfm_counter_t           ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
+       unsigned long           th_pmcs[PFM_NUM_PMC_REGS];      /* PMC thread save state */
+       unsigned long           th_pmds[PFM_NUM_PMD_REGS];      /* PMD thread save state */
  
        u64                     ctx_saved_psr_up;       /* only contains psr.up value */
  
@@@ -868,7 -873,6 +874,6 @@@ static voi
  pfm_mask_monitoring(struct task_struct *task)
  {
        pfm_context_t *ctx = PFM_GET_CTX(task);
-       struct thread_struct *th = &task->thread;
        unsigned long mask, val, ovfl_mask;
        int i;
  
         * So in both cases, the live register contains the owner's
         * state. We can ONLY touch the PMU registers and NOT the PSR.
         *
-        * As a consequence to this call, the thread->pmds[] array
+        * As a consequence to this call, the ctx->th_pmds[] array
         * contains stale information which must be ignored
         * when context is reloaded AND monitoring is active (see
         * pfm_restart).
        mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
        for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
                if ((mask & 0x1) == 0UL) continue;
-               ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);
-               th->pmcs[i] &= ~0xfUL;
-               DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i]));
+               ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
+               ctx->th_pmcs[i] &= ~0xfUL;
+               DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
        }
        /*
         * make all of this visible
@@@ -943,7 -947,6 +948,6 @@@ static voi
  pfm_restore_monitoring(struct task_struct *task)
  {
        pfm_context_t *ctx = PFM_GET_CTX(task);
-       struct thread_struct *th = &task->thread;
        unsigned long mask, ovfl_mask;
        unsigned long psr, val;
        int i, is_system;
        mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
        for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
                if ((mask & 0x1) == 0UL) continue;
-               th->pmcs[i] = ctx->ctx_pmcs[i];
-               ia64_set_pmc(i, th->pmcs[i]);
-               DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i]));
+               ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
+               ia64_set_pmc(i, ctx->th_pmcs[i]);
+               DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
        }
        ia64_srlz_d();
  
@@@ -1070,7 -1073,6 +1074,6 @@@ pfm_restore_pmds(unsigned long *pmds, u
  static inline void
  pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
  {
-       struct thread_struct *thread = &task->thread;
        unsigned long ovfl_val = pmu_conf->ovfl_val;
        unsigned long mask = ctx->ctx_all_pmds[0];
        unsigned long val;
                        ctx->ctx_pmds[i].val = val & ~ovfl_val;
                         val &= ovfl_val;
                }
-               thread->pmds[i] = val;
+               ctx->th_pmds[i] = val;
  
                DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
                        i,
-                       thread->pmds[i],
+                       ctx->th_pmds[i],
                        ctx->ctx_pmds[i].val));
        }
  }
  static inline void
  pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
  {
-       struct thread_struct *thread = &task->thread;
        unsigned long mask = ctx->ctx_all_pmcs[0];
        int i;
  
  
        for (i=0; mask; i++, mask>>=1) {
                /* masking 0 with ovfl_val yields 0 */
-               thread->pmcs[i] = ctx->ctx_pmcs[i];
-               DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i]));
+               ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
+               DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
        }
  }
  
@@@ -2860,7 -2861,6 +2862,6 @@@ pfm_reset_regs(pfm_context_t *ctx, unsi
  static int
  pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  {
-       struct thread_struct *thread = NULL;
        struct task_struct *task;
        pfarg_reg_t *req = (pfarg_reg_t *)arg;
        unsigned long value, pmc_pm;
        if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  
        if (is_loaded) {
-               thread = &task->thread;
                /*
                 * In system wide and when the context is loaded, access can only happen
                 * when the caller is running on the CPU being monitored by the session.
                 *
                 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
                 *
-                * The value in thread->pmcs[] may be modified on overflow, i.e.,  when
+                * The value in th_pmcs[] may be modified on overflow, i.e.,  when
                 * monitoring needs to be stopped.
                 */
                if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
                        /*
                         * write thread state
                         */
-                       if (is_system == 0) thread->pmcs[cnum] = value;
+                       if (is_system == 0) ctx->th_pmcs[cnum] = value;
  
                        /*
                         * write hardware register if we can
@@@ -3102,7 -3101,6 +3102,6 @@@ error
  static int
  pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  {
-       struct thread_struct *thread = NULL;
        struct task_struct *task;
        pfarg_reg_t *req = (pfarg_reg_t *)arg;
        unsigned long value, hw_value, ovfl_mask;
         * the owner of the local PMU.
         */
        if (likely(is_loaded)) {
-               thread = &task->thread;
                /*
                 * In system wide and when the context is loaded, access can only happen
                 * when the caller is running on the CPU being monitored by the session.
                        /*
                         * write thread state
                         */
-                       if (is_system == 0) thread->pmds[cnum] = hw_value;
+                       if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
  
                        /*
                         * write hardware register if we can
@@@ -3300,7 -3297,6 +3298,6 @@@ abort_mission
  static int
  pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  {
-       struct thread_struct *thread = NULL;
        struct task_struct *task;
        unsigned long val = 0UL, lval, ovfl_mask, sval;
        pfarg_reg_t *req = (pfarg_reg_t *)arg;
        if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  
        if (likely(is_loaded)) {
-               thread = &task->thread;
                /*
                 * In system wide and when the context is loaded, access can only happen
                 * when the caller is running on the CPU being monitored by the session.
                         * if context is zombie, then task does not exist anymore.
                         * In this case, we use the full value saved in the context (pfm_flush_regs()).
                         */
-                       val = is_loaded ? thread->pmds[cnum] : 0UL;
+                       val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
                }
                rd_func = pmu_conf->pmd_desc[cnum].read_check;
  
@@@ -4355,8 -4350,8 +4351,8 @@@ pfm_context_load(pfm_context_t *ctx, vo
        pfm_copy_pmds(task, ctx);
        pfm_copy_pmcs(task, ctx);
  
-       pmcs_source = thread->pmcs;
-       pmds_source = thread->pmds;
+       pmcs_source = ctx->th_pmcs;
+       pmds_source = ctx->th_pmds;
  
        /*
         * always the case for system-wide
  pfm_save_regs(struct task_struct *task)
  {
        pfm_context_t *ctx;
-       struct thread_struct *t;
        unsigned long flags;
        u64 psr;
  
  
        ctx = PFM_GET_CTX(task);
        if (ctx == NULL) return;
-       t = &task->thread;
  
        /*
         * we always come here with interrupts ALREADY disabled by
         * guarantee we will be schedule at that same
         * CPU again.
         */
-       pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
+       pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  
        /*
         * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
         * we will need it on the restore path to check
         * for pending overflow.
         */
-       t->pmcs[0] = ia64_get_pmc(0);
+       ctx->th_pmcs[0] = ia64_get_pmc(0);
  
        /*
         * unfreeze PMU if had pending overflows
         */
-       if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
+       if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  
        /*
         * finally, allow context access.
@@@ -5987,7 -5980,6 +5981,6 @@@ static voi
  pfm_lazy_save_regs (struct task_struct *task)
  {
        pfm_context_t *ctx;
-       struct thread_struct *t;
        unsigned long flags;
  
        { u64 psr  = pfm_get_psr();
        }
  
        ctx = PFM_GET_CTX(task);
-       t   = &task->thread;
  
        /*
         * we need to mask PMU overflow here to
        /*
         * save all the pmds we use
         */
-       pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
+       pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  
        /*
         * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
         * it is needed to check for pended overflow
         * on the restore path
         */
-       t->pmcs[0] = ia64_get_pmc(0);
+       ctx->th_pmcs[0] = ia64_get_pmc(0);
  
        /*
         * unfreeze PMU if had pending overflows
         */
-       if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
+       if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  
        /*
         * now get can unmask PMU interrupts, they will
@@@ -6051,7 -6042,6 +6043,6 @@@ voi
  pfm_load_regs (struct task_struct *task)
  {
        pfm_context_t *ctx;
-       struct thread_struct *t;
        unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
        unsigned long flags;
        u64 psr, psr_up;
  
        BUG_ON(GET_PMU_OWNER());
  
-       t     = &task->thread;
        /*
         * possible on unload
         */
-       if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return;
+       if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
  
        /*
         * we always come here with interrupts ALREADY disabled by
         *
         * XXX: optimize here
         */
-       if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask);
-       if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask);
+       if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
+       if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  
        /*
         * check for pending overflow at the time the state
         * was saved.
         */
-       if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
+       if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
                /*
                 * reload pmc0 with the overflow information
                 * On McKinley PMU, this will trigger a PMU interrupt
                 */
-               ia64_set_pmc(0, t->pmcs[0]);
+               ia64_set_pmc(0, ctx->th_pmcs[0]);
                ia64_srlz_d();
-               t->pmcs[0] = 0UL;
+               ctx->th_pmcs[0] = 0UL;
  
                /*
                 * will replay the PMU interrupt
  void
  pfm_load_regs (struct task_struct *task)
  {
-       struct thread_struct *t;
        pfm_context_t *ctx;
        struct task_struct *owner;
        unsigned long pmd_mask, pmc_mask;
  
        owner = GET_PMU_OWNER();
        ctx   = PFM_GET_CTX(task);
-       t     = &task->thread;
        psr   = pfm_get_psr();
  
        BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
         */
        pmc_mask = ctx->ctx_all_pmcs[0];
  
-       pfm_restore_pmds(t->pmds, pmd_mask);
-       pfm_restore_pmcs(t->pmcs, pmc_mask);
+       pfm_restore_pmds(ctx->th_pmds, pmd_mask);
+       pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  
        /*
         * check for pending overflow at the time the state
         * was saved.
         */
-       if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
+       if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
                /*
                 * reload pmc0 with the overflow information
                 * On McKinley PMU, this will trigger a PMU interrupt
                 */
-               ia64_set_pmc(0, t->pmcs[0]);
+               ia64_set_pmc(0, ctx->th_pmcs[0]);
                ia64_srlz_d();
  
-               t->pmcs[0] = 0UL;
+               ctx->th_pmcs[0] = 0UL;
  
                /*
                 * will replay the PMU interrupt
@@@ -6377,11 -6364,11 +6365,11 @@@ pfm_flush_pmds(struct task_struct *task
                 */
                pfm_unfreeze_pmu();
        } else {
-               pmc0 = task->thread.pmcs[0];
+               pmc0 = ctx->th_pmcs[0];
                /*
                 * clear whatever overflow status bits there were
                 */
-               task->thread.pmcs[0] = 0;
+               ctx->th_pmcs[0] = 0;
        }
        ovfl_val = pmu_conf->ovfl_val;
        /*
                /*
                 * can access PMU always true in system wide mode
                 */
-               val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i];
+               val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
  
                if (PMD_IS_COUNTING(i)) {
                        DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
  
                DPRINT(("[%d] ctx_pmd[%d]=0x%lx  pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
  
-               if (is_self) task->thread.pmds[i] = pmd_val;
+               if (is_self) ctx->th_pmds[i] = pmd_val;
  
                ctx->ctx_pmds[i].val = val;
        }
@@@ -6678,7 -6665,7 +6666,7 @@@ pfm_init(void
               ffz(pmu_conf->ovfl_val));
  
        /* sanity check */
-       if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) {
+       if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
                printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
                pmu_conf = NULL;
                return -1;
@@@ -6753,7 -6740,6 +6741,6 @@@ voi
  dump_pmu_state(const char *from)
  {
        struct task_struct *task;
-       struct thread_struct *t;
        struct pt_regs *regs;
        pfm_context_t *ctx;
        unsigned long psr, dcr, info, flags;
        ia64_psr(regs)->up = 0;
        ia64_psr(regs)->pp = 0;
  
-       t = &current->thread;
        for (i=1; PMC_IS_LAST(i) == 0; i++) {
                if (PMC_IS_IMPL(i) == 0) continue;
-               printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
+               printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
        }
  
        for (i=1; PMD_IS_LAST(i) == 0; i++) {
                if (PMD_IS_IMPL(i) == 0) continue;
-               printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
+               printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
        }
  
        if (ctx) {
diff --combined arch/ia64/mm/contig.c
@@@ -26,6 -26,7 +26,6 @@@
  #include <asm/mca.h>
  
  #ifdef CONFIG_VIRTUAL_MEM_MAP
 -static unsigned long num_dma_physpages;
  static unsigned long max_gap;
  #endif
  
@@@ -40,10 -41,11 +40,11 @@@ show_mem (void
        int i, total = 0, reserved = 0;
        int shared = 0, cached = 0;
  
-       printk("Mem-info:\n");
+       printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
  
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       printk(KERN_INFO "Free swap:       %6ldkB\n",
+              nr_swap_pages<<(PAGE_SHIFT-10));
        i = max_mapnr;
        for (i = 0; i < max_mapnr; i++) {
                if (!pfn_valid(i)) {
                else if (page_count(mem_map + i))
                        shared += page_count(mem_map + i) - 1;
        }
-       printk("%d pages of RAM\n", total);
-       printk("%d reserved pages\n", reserved);
-       printk("%d pages shared\n", shared);
-       printk("%d pages swap cached\n", cached);
-       printk("%ld pages in page table cache\n",
-               pgtable_quicklist_total_size());
+       printk(KERN_INFO "%d pages of RAM\n", total);
+       printk(KERN_INFO "%d reserved pages\n", reserved);
+       printk(KERN_INFO "%d pages shared\n", shared);
+       printk(KERN_INFO "%d pages swap cached\n", cached);
+       printk(KERN_INFO "%ld pages in page table cache\n",
+              pgtable_quicklist_total_size());
  }
  
  /* physical address where the bootmem map is located */
@@@ -217,6 -219,18 +218,6 @@@ count_pages (u64 start, u64 end, void *
        return 0;
  }
  
 -#ifdef CONFIG_VIRTUAL_MEM_MAP
 -static int
 -count_dma_pages (u64 start, u64 end, void *arg)
 -{
 -      unsigned long *count = arg;
 -
 -      if (start < MAX_DMA_ADDRESS)
 -              *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
 -      return 0;
 -}
 -#endif
 -
  /*
   * Set up the page tables.
   */
@@@ -225,22 -239,45 +226,22 @@@ void __ini
  paging_init (void)
  {
        unsigned long max_dma;
 -      unsigned long zones_size[MAX_NR_ZONES];
 -#ifdef CONFIG_VIRTUAL_MEM_MAP
 -      unsigned long zholes_size[MAX_NR_ZONES];
 -#endif
 -
 -      /* initialize mem_map[] */
 -
 -      memset(zones_size, 0, sizeof(zones_size));
 +      unsigned long nid = 0;
 +      unsigned long max_zone_pfns[MAX_NR_ZONES];
  
        num_physpages = 0;
        efi_memmap_walk(count_pages, &num_physpages);
  
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 +      max_zone_pfns[ZONE_DMA] = max_dma;
 +      max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  
  #ifdef CONFIG_VIRTUAL_MEM_MAP
 -      memset(zholes_size, 0, sizeof(zholes_size));
 -
 -      num_dma_physpages = 0;
 -      efi_memmap_walk(count_dma_pages, &num_dma_physpages);
 -
 -      if (max_low_pfn < max_dma) {
 -              zones_size[ZONE_DMA] = max_low_pfn;
 -              zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
 -      } else {
 -              zones_size[ZONE_DMA] = max_dma;
 -              zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
 -              if (num_physpages > num_dma_physpages) {
 -                      zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
 -                      zholes_size[ZONE_NORMAL] =
 -                              ((max_low_pfn - max_dma) -
 -                               (num_physpages - num_dma_physpages));
 -              }
 -      }
 -
 +      efi_memmap_walk(register_active_ranges, &nid);
        efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
        if (max_gap < LARGE_GAP) {
                vmem_map = (struct page *) 0;
 -              free_area_init_node(0, NODE_DATA(0), zones_size, 0,
 -                                  zholes_size);
 +              free_area_init_nodes(max_zone_pfns);
        } else {
                unsigned long map_size;
  
                vmem_map = (struct page *) vmalloc_end;
                efi_memmap_walk(create_mem_map_page_table, NULL);
  
 -              NODE_DATA(0)->node_mem_map = vmem_map;
 -              free_area_init_node(0, NODE_DATA(0), zones_size,
 -                                  0, zholes_size);
 +              /*
 +               * alloc_node_mem_map makes an adjustment for mem_map
 +               * which isn't compatible with vmem_map.
 +               */
 +              NODE_DATA(0)->node_mem_map = vmem_map +
 +                      find_min_pfn_with_active_regions();
 +              free_area_init_nodes(max_zone_pfns);
  
                printk("Virtual mem_map starts at 0x%p\n", mem_map);
        }
  #else /* !CONFIG_VIRTUAL_MEM_MAP */
 -      if (max_low_pfn < max_dma)
 -              zones_size[ZONE_DMA] = max_low_pfn;
 -      else {
 -              zones_size[ZONE_DMA] = max_dma;
 -              zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
 -      }
 -      free_area_init(zones_size);
 +      add_active_range(0, 0, max_low_pfn);
 +      free_area_init_nodes(max_zone_pfns);
  #endif /* !CONFIG_VIRTUAL_MEM_MAP */
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
  }
diff --combined arch/ia64/mm/discontig.c
@@@ -547,15 -547,16 +547,16 @@@ void show_mem(void
        unsigned long total_present = 0;
        pg_data_t *pgdat;
  
-       printk("Mem-info:\n");
+       printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       printk(KERN_INFO "Free swap:       %6ldkB\n",
+              nr_swap_pages<<(PAGE_SHIFT-10));
+       printk(KERN_INFO "Node memory in pages:\n");
        for_each_online_pgdat(pgdat) {
                unsigned long present;
                unsigned long flags;
                int shared = 0, cached = 0, reserved = 0;
  
-               printk("Node ID: %d\n", pgdat->node_id);
                pgdat_resize_lock(pgdat, &flags);
                present = pgdat->node_present_pages;
                for(i = 0; i < pgdat->node_spanned_pages; i++) {
                total_reserved += reserved;
                total_cached += cached;
                total_shared += shared;
-               printk("\t%ld pages of RAM\n", present);
-               printk("\t%d reserved pages\n", reserved);
-               printk("\t%d pages shared\n", shared);
-               printk("\t%d pages swap cached\n", cached);
+               printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
+                      "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+                      present, reserved, shared, cached);
        }
-       printk("%ld pages of RAM\n", total_present);
-       printk("%d reserved pages\n", total_reserved);
-       printk("%d pages shared\n", total_shared);
-       printk("%d pages swap cached\n", total_cached);
-       printk("Total of %ld pages in page table cache\n",
-               pgtable_quicklist_total_size());
-       printk("%d free buffer pages\n", nr_free_buffer_pages());
+       printk(KERN_INFO "%ld pages of RAM\n", total_present);
+       printk(KERN_INFO "%d reserved pages\n", total_reserved);
+       printk(KERN_INFO "%d pages shared\n", total_shared);
+       printk(KERN_INFO "%d pages swap cached\n", total_cached);
+       printk(KERN_INFO "Total of %ld pages in page table cache\n",
+              pgtable_quicklist_total_size());
+       printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
  }
  
  /**
@@@ -654,7 -654,6 +654,7 @@@ static __init int count_node_pages(unsi
  {
        unsigned long end = start + len;
  
 +      add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
        mem_data[node].num_physpages += len >> PAGE_SHIFT;
        if (start <= __pa(MAX_DMA_ADDRESS))
                mem_data[node].num_dma_physpages +=
  void __init paging_init(void)
  {
        unsigned long max_dma;
 -      unsigned long zones_size[MAX_NR_ZONES];
 -      unsigned long zholes_size[MAX_NR_ZONES];
        unsigned long pfn_offset = 0;
 +      unsigned long max_pfn = 0;
        int node;
 +      unsigned long max_zone_pfns[MAX_NR_ZONES];
  
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  
  #endif
  
        for_each_online_node(node) {
 -              memset(zones_size, 0, sizeof(zones_size));
 -              memset(zholes_size, 0, sizeof(zholes_size));
 -
                num_physpages += mem_data[node].num_physpages;
 -
 -              if (mem_data[node].min_pfn >= max_dma) {
 -                      /* All of this node's memory is above ZONE_DMA */
 -                      zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
 -                              mem_data[node].min_pfn;
 -                      zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
 -                              mem_data[node].min_pfn -
 -                              mem_data[node].num_physpages;
 -              } else if (mem_data[node].max_pfn < max_dma) {
 -                      /* All of this node's memory is in ZONE_DMA */
 -                      zones_size[ZONE_DMA] = mem_data[node].max_pfn -
 -                              mem_data[node].min_pfn;
 -                      zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
 -                              mem_data[node].min_pfn -
 -                              mem_data[node].num_dma_physpages;
 -              } else {
 -                      /* This node has memory in both zones */
 -                      zones_size[ZONE_DMA] = max_dma -
 -                              mem_data[node].min_pfn;
 -                      zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
 -                              mem_data[node].num_dma_physpages;
 -                      zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
 -                              max_dma;
 -                      zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
 -                              (mem_data[node].num_physpages -
 -                               mem_data[node].num_dma_physpages);
 -              }
 -
                pfn_offset = mem_data[node].min_pfn;
  
  #ifdef CONFIG_VIRTUAL_MEM_MAP
                NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
  #endif
 -              free_area_init_node(node, NODE_DATA(node), zones_size,
 -                                  pfn_offset, zholes_size);
 +              if (mem_data[node].max_pfn > max_pfn)
 +                      max_pfn = mem_data[node].max_pfn;
        }
  
 +      max_zone_pfns[ZONE_DMA] = max_dma;
 +      max_zone_pfns[ZONE_NORMAL] = max_pfn;
 +      free_area_init_nodes(max_zone_pfns);
 +
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
  }
  
diff --combined include/asm-ia64/smp.h
@@@ -122,10 -122,13 +122,11 @@@ extern void __init smp_build_cpu_map(vo
  extern void __init init_smp_config (void);
  extern void smp_do_timer (struct pt_regs *regs);
  
 -extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
 -                                   int retry, int wait);
  extern void smp_send_reschedule (int cpu);
  extern void lock_ipi_calllock(void);
  extern void unlock_ipi_calllock(void);
  extern void identify_siblings (struct cpuinfo_ia64 *);
+ extern int is_multithreading_enabled(void);
  
  #else