1 /* internal.h: mm/ internal definitions
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #ifndef __MM_INTERNAL_H
12 #define __MM_INTERNAL_H
16 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 unsigned long floor, unsigned long ceiling);
19 extern void prep_compound_page(struct page *page, unsigned long order);
21 static inline void set_page_count(struct page *page, int v)
23 atomic_set(&page->_count, v);
27 * Turn a non-refcounted page (->_count == 0) into refcounted with
30 static inline void set_page_refcounted(struct page *page)
32 VM_BUG_ON(PageTail(page));
33 VM_BUG_ON(atomic_read(&page->_count));
34 set_page_count(page, 1);
37 static inline void __put_page(struct page *page)
39 atomic_dec(&page->_count);
45 extern int isolate_lru_page(struct page *page);
46 extern void putback_lru_page(struct page *page);
51 extern void __free_pages_bootmem(struct page *page, unsigned int order);
54 * function for dealing with page's order in buddy system.
55 * zone->lock is already acquired when we use these.
56 * So, we don't need atomic page->flags operations here.
58 static inline unsigned long page_order(struct page *page)
60 VM_BUG_ON(!PageBuddy(page));
61 return page_private(page);
64 #ifdef CONFIG_UNEVICTABLE_LRU
66 * unevictable_migrate_page() called only from migrate_page_copy() to
67 * migrate unevictable flag to new page.
68 * Note that the old page has been isolated from the LRU lists at this
69 * point so we don't need to worry about LRU statistics.
71 static inline void unevictable_migrate_page(struct page *new, struct page *old)
73 if (TestClearPageUnevictable(old))
74 SetPageUnevictable(new);
77 static inline void unevictable_migrate_page(struct page *new, struct page *old)
84 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
85 * so all functions starting at paging_init should be marked __init
86 * in those cases. SPARSEMEM, however, allows for memory hotplug,
87 * and alloc_bootmem_node is not used.
89 #ifdef CONFIG_SPARSEMEM
90 #define __paginginit __meminit
92 #define __paginginit __init
95 /* Memory initialisation debug and verification */
102 #ifdef CONFIG_DEBUG_MEMORY_INIT
104 extern int mminit_loglevel;
106 #define mminit_dprintk(level, prefix, fmt, arg...) \
108 if (level < mminit_loglevel) { \
109 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
110 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
114 extern void mminit_verify_pageflags_layout(void);
115 extern void mminit_verify_page_links(struct page *page,
116 enum zone_type zone, unsigned long nid, unsigned long pfn);
117 extern void mminit_verify_zonelist(void);
121 static inline void mminit_dprintk(enum mminit_level level,
122 const char *prefix, const char *fmt, ...)
126 static inline void mminit_verify_pageflags_layout(void)
130 static inline void mminit_verify_page_links(struct page *page,
131 enum zone_type zone, unsigned long nid, unsigned long pfn)
135 static inline void mminit_verify_zonelist(void)
138 #endif /* CONFIG_DEBUG_MEMORY_INIT */
140 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
141 #if defined(CONFIG_SPARSEMEM)
142 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
143 unsigned long *end_pfn);
145 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
146 unsigned long *end_pfn)
149 #endif /* CONFIG_SPARSEMEM */