x86 boot: show pfn addresses in hex not decimal in some kernel info printks
[pandora-kernel.git] / mm / page_alloc.c
index 6383557..c09c2c8 100644 (file)
@@ -237,16 +237,7 @@ static void bad_page(struct page *page)
        printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
                KERN_EMERG "Backtrace:\n");
        dump_stack();
-       page->flags &= ~(1 << PG_lru    |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_reclaim |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_buddy );
+       page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
        set_page_count(page, 0);
        reset_page_mapcount(page);
        page->mapping = NULL;
@@ -463,16 +454,7 @@ static inline int free_pages_check(struct page *page)
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
                bad_page(page);
        if (PageDirty(page))
                __ClearPageDirty(page);
@@ -616,17 +598,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
                bad_page(page);
 
        /*
@@ -1396,6 +1368,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 
        (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
                                                        &preferred_zone);
+       if (!preferred_zone)
+               return NULL;
+
        classzone_idx = zone_idx(preferred_zone);
 
 zonelist_scan:
@@ -2353,7 +2328,6 @@ static void build_zonelists(pg_data_t *pgdat)
 static void build_zonelist_cache(pg_data_t *pgdat)
 {
        pgdat->node_zonelists[0].zlcache_ptr = NULL;
-       pgdat->node_zonelists[1].zlcache_ptr = NULL;
 }
 
 #endif /* CONFIG_NUMA */
@@ -2804,7 +2778,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        alloc_size = zone->wait_table_hash_nr_entries
                                        * sizeof(wait_queue_head_t);
 
-       if (system_state == SYSTEM_BOOTING) {
+       if (!slab_is_available()) {
                zone->wait_table = (wait_queue_head_t *)
                        alloc_bootmem_node(pgdat, alloc_size);
        } else {
@@ -2955,6 +2929,18 @@ void __init free_bootmem_with_active_regions(int nid,
        }
 }
 
+void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
+{
+       int i;
+       int ret;
+
+       for_each_active_range_index_in_nid(i, nid) {
+               ret = work_fn(early_node_map[i].start_pfn,
+                             early_node_map[i].end_pfn, data);
+               if (ret)
+                       break;
+       }
+}
 /**
  * sparse_memory_present_with_active_regions - Call memory_present for each active range
  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3378,7 +3364,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                 * is used by this zone for memmap. This affects the watermark
                 * and per-cpu initialisations
                 */
-               memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+               memmap_pages =
+                       PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
                if (realsize >= memmap_pages) {
                        realsize -= memmap_pages;
                        printk(KERN_DEBUG
@@ -3486,6 +3473,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
        calculate_node_totalpages(pgdat, zones_size, zholes_size);
 
        alloc_node_mem_map(pgdat);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+       printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
+               nid, (unsigned long)pgdat,
+               (unsigned long)pgdat->node_mem_map);
+#endif
 
        free_area_init_core(pgdat, zones_size, zholes_size);
 }
@@ -3528,7 +3520,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
 {
        int i;
 
-       printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
+       printk(KERN_DEBUG "Entering add_active_range(%d, 0x%lx, 0x%lx) "
                          "%d entries of %d used\n",
                          nid, start_pfn, end_pfn,
                          nr_nodemap_entries, MAX_ACTIVE_REGIONS);
@@ -3572,27 +3564,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
 }
 
 /**
- * shrink_active_range - Shrink an existing registered range of PFNs
+ * remove_active_range - Shrink an existing registered range of PFNs
  * @nid: The node id the range is on that should be shrunk
- * @old_end_pfn: The old end PFN of the range
- * @new_end_pfn: The new PFN of the range
+ * @start_pfn: The new PFN of the range
+ * @end_pfn: The new PFN of the range
  *
  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept at the end physical page range that has already been
- * registered with add_active_range(). This function allows an arch to shrink
- * an existing registered range.
+ * The map is kept near the end physical page range that has already been
+ * registered. This function allows an arch to shrink an existing registered
+ * range.
  */
-void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
-                                               unsigned long new_end_pfn)
+void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
+                               unsigned long end_pfn)
 {
-       int i;
+       int i, j;
+       int removed = 0;
+
+       printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
+                         nid, start_pfn, end_pfn);
 
        /* Find the old active region end and shrink */
-       for_each_active_range_index_in_nid(i, nid)
-               if (early_node_map[i].end_pfn == old_end_pfn) {
-                       early_node_map[i].end_pfn = new_end_pfn;
-                       break;
+       for_each_active_range_index_in_nid(i, nid) {
+               if (early_node_map[i].start_pfn >= start_pfn &&
+                   early_node_map[i].end_pfn <= end_pfn) {
+                       /* clear it */
+                       early_node_map[i].start_pfn = 0;
+                       early_node_map[i].end_pfn = 0;
+                       removed = 1;
+                       continue;
+               }
+               if (early_node_map[i].start_pfn < start_pfn &&
+                   early_node_map[i].end_pfn > start_pfn) {
+                       unsigned long temp_end_pfn = early_node_map[i].end_pfn;
+                       early_node_map[i].end_pfn = start_pfn;
+                       if (temp_end_pfn > end_pfn)
+                               add_active_range(nid, end_pfn, temp_end_pfn);
+                       continue;
+               }
+               if (early_node_map[i].start_pfn >= start_pfn &&
+                   early_node_map[i].end_pfn > end_pfn &&
+                   early_node_map[i].start_pfn < end_pfn) {
+                       early_node_map[i].start_pfn = end_pfn;
+                       continue;
                }
+       }
+
+       if (!removed)
+               return;
+
+       /* remove the blank ones */
+       for (i = nr_nodemap_entries - 1; i > 0; i--) {
+               if (early_node_map[i].nid != nid)
+                       continue;
+               if (early_node_map[i].end_pfn)
+                       continue;
+               /* we found it, get rid of it */
+               for (j = i; j < nr_nodemap_entries - 1; j++)
+                       memcpy(&early_node_map[j], &early_node_map[j+1],
+                               sizeof(early_node_map[j]));
+               j = nr_nodemap_entries - 1;
+               memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
+               nr_nodemap_entries--;
+       }
 }
 
 /**
@@ -3903,7 +3936,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        for (i = 0; i < MAX_NR_ZONES; i++) {
                if (i == ZONE_MOVABLE)
                        continue;
-               printk("  %-8s %8lu -> %8lu\n",
+               printk("  %-8s 0x%8lx -> 0x%8lx\n",
                                zone_names[i],
                                arch_zone_lowest_possible_pfn[i],
                                arch_zone_highest_possible_pfn[i]);
@@ -3919,7 +3952,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Print out the early_node_map[] */
        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
        for (i = 0; i < nr_nodemap_entries; i++)
-               printk("  %3d: %8lu -> %8lu\n", early_node_map[i].nid,
+               printk("  %3d: 0x%8lx -> 0x%8lx\n", early_node_map[i].nid,
                                                early_node_map[i].start_pfn,
                                                early_node_map[i].end_pfn);