[Bluetooth] Add support for TIOCOUTQ and TIOCINQ ioctls
[pandora-kernel.git] / mm / page_alloc.c
index bdd5c43..f32fae3 100644 (file)
@@ -237,16 +237,7 @@ static void bad_page(struct page *page)
        printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
                KERN_EMERG "Backtrace:\n");
        dump_stack();
-       page->flags &= ~(1 << PG_lru    |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_reclaim |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_buddy );
+       page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
        set_page_count(page, 0);
        reset_page_mapcount(page);
        page->mapping = NULL;
@@ -463,16 +454,7 @@ static inline int free_pages_check(struct page *page)
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
                bad_page(page);
        if (PageDirty(page))
                __ClearPageDirty(page);
@@ -616,17 +598,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
                bad_page(page);
 
        /*
@@ -1396,6 +1368,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 
        (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
                                                        &preferred_zone);
+       if (!preferred_zone)
+               return NULL;
+
        classzone_idx = zone_idx(preferred_zone);
 
 zonelist_scan:
@@ -2353,7 +2328,6 @@ static void build_zonelists(pg_data_t *pgdat)
 static void build_zonelist_cache(pg_data_t *pgdat)
 {
        pgdat->node_zonelists[0].zlcache_ptr = NULL;
-       pgdat->node_zonelists[1].zlcache_ptr = NULL;
 }
 
 #endif /* CONFIG_NUMA */
@@ -2804,7 +2778,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        alloc_size = zone->wait_table_hash_nr_entries
                                        * sizeof(wait_queue_head_t);
 
-       if (system_state == SYSTEM_BOOTING) {
+       if (!slab_is_available()) {
                zone->wait_table = (wait_queue_head_t *)
                        alloc_bootmem_node(pgdat, alloc_size);
        } else {
@@ -2862,8 +2836,6 @@ __meminit int init_currently_empty_zone(struct zone *zone,
 
        zone->zone_start_pfn = zone_start_pfn;
 
-       memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
-
        zone_init_free_lists(zone);
 
        return 0;
@@ -3380,7 +3352,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                 * is used by this zone for memmap. This affects the watermark
                 * and per-cpu initialisations
                 */
-               memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+               memmap_pages =
+                       PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
                if (realsize >= memmap_pages) {
                        realsize -= memmap_pages;
                        printk(KERN_DEBUG
@@ -3433,6 +3406,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
                BUG_ON(ret);
+               memmap_init(size, nid, j, zone_start_pfn);
                zone_start_pfn += size;
        }
 }