X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fmm.h;h=4edf1934e5cae2911ccf0b0917d7072fd6f672c4;hb=cf9a2ae8d49948f861b56e5333530e491a9da190;hp=856f0ee7e84ab53370f6df38d791547bfbfdcaa8;hpb=c226951b93f7cd7c3a10b17384535b617bd43fd0;p=pandora-kernel.git diff --git a/include/linux/mm.h b/include/linux/mm.h index 856f0ee7e84a..4edf1934e5ca 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -16,6 +16,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -198,6 +199,7 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); + unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); /* notification that a previously read-only page is about to become @@ -215,62 +217,6 @@ struct vm_operations_struct { struct mmu_gather; struct inode; -/* - * Each physical page in the system has a struct page associated with - * it to keep track of whatever it is we are using the page for at the - * moment. Note that we have no way to track which tasks are using - * a page, though if it is a pagecache page, rmap structures can tell us - * who is mapping it. - */ -struct page { - unsigned long flags; /* Atomic flags, some possibly - * updated asynchronously */ - atomic_t _count; /* Usage count, see below. */ - atomic_t _mapcount; /* Count of ptes mapped in mms, - * to show when page is mapped - * & limit reverse map searches. - */ - union { - struct { - unsigned long private; /* Mapping-private opaque data: - * usually used for buffer_heads - * if PagePrivate set; used for - * swp_entry_t if PageSwapCache; - * indicates order in the buddy - * system if PG_buddy is set. - */ - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ - }; -#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS - spinlock_t ptl; -#endif - }; - pgoff_t index; /* Our offset within mapping. */ - struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! - */ - /* - * On machines where all RAM is mapped into kernel address space, - * we can simply calculate the virtual address. On machines with - * highmem some memory is mapped into kernel virtual memory - * dynamically, so we need a place to store that address. - * Note that this field could be 16 bits on x86 ... ;) - * - * Architectures with slow multiplication can define - * WANT_PAGE_VIRTUAL in asm/page.h - */ -#if defined(WANT_PAGE_VIRTUAL) - void *virtual; /* Kernel virtual address (NULL if - not kmapped, ie. highmem) */ -#endif /* WANT_PAGE_VIRTUAL */ -}; - #define page_private(page) ((page)->private) #define set_page_private(page, v) ((page)->private = (v)) @@ -501,7 +447,11 @@ static inline struct zone *page_zone(struct page *page) static inline unsigned long zone_to_nid(struct zone *zone) { - return zone->zone_pgdat->node_id; +#ifdef CONFIG_NUMA + return zone->node; +#else + return 0; +#endif } static inline unsigned long page_to_nid(struct page *page) @@ -546,11 +496,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone, */ #include -#ifndef CONFIG_DISCONTIGMEM -/* The array of struct pages - for discontigmem use pgdat->lmem_map */ -extern struct page *mem_map; -#endif - static __always_inline void *lowmem_page_address(struct page *page) { return __va(page_to_pfn(page) << PAGE_SHIFT); @@ -649,6 +594,12 @@ static inline int page_mapped(struct page *page) #define NOPAGE_SIGBUS (NULL) #define NOPAGE_OOM ((struct page *) (-1)) +/* + * Error return values for the *_nopfn functions + */ +#define NOPFN_SIGBUS ((unsigned long) -1) +#define NOPFN_OOM ((unsigned long) -2) + /* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or @@ -792,7 +743,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); -int __set_page_dirty_buffers(struct page *page); +extern int try_to_release_page(struct page * page, gfp_t gfp_mask); +extern void do_invalidatepage(struct page *page, unsigned long offset); + int __set_page_dirty_nobuffers(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); @@ -937,6 +890,56 @@ extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, pg_data_t *pgdat, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP +/* + * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its + * zones, allocate the backing mem_map and account for memory holes in a more + * architecture independent manner. This is a substitute for creating the + * zone_sizes[] and zholes_size[] arrays and passing them to + * free_area_init_node() + * + * An architecture is expected to register range of page frames backed by + * physical memory with add_active_range() before calling + * free_area_init_nodes() passing in the PFN each zone ends at. At a basic + * usage, an architecture is expected to do something like + * + * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, + * max_highmem_pfn}; + * for_each_valid_physical_page_range() + * add_active_range(node_id, start_pfn, end_pfn) + * free_area_init_nodes(max_zone_pfns); + * + * If the architecture guarantees that there are no holes in the ranges + * registered with add_active_range(), free_bootmem_active_regions() + * will call free_bootmem_node() for each registered physical page range. + * Similarly sparse_memory_present_with_active_regions() calls + * memory_present() for each range when SPARSEMEM is enabled. + * + * See mm/page_alloc.c for more information on each function exposed by + * CONFIG_ARCH_POPULATES_NODE_MAP + */ +extern void free_area_init_nodes(unsigned long *max_zone_pfn); +extern void add_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, + unsigned long new_end_pfn); +extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern void remove_all_active_ranges(void); +extern unsigned long absent_pages_in_range(unsigned long start_pfn, + unsigned long end_pfn); +extern void get_pfn_range_for_nid(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn); +extern unsigned long find_min_pfn_with_active_regions(void); +extern unsigned long find_max_pfn_with_active_regions(void); +extern void free_bootmem_with_active_regions(int nid, + unsigned long max_low_pfn); +extern void sparse_memory_present_with_active_regions(int nid); +#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +extern int early_pfn_to_nid(unsigned long pfn); +#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ +#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ +extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); extern void setup_per_zone_pages_min(void); extern void mem_init(void); @@ -1130,7 +1133,7 @@ void drop_slab(void); extern int randomize_va_space; #endif -const char *arch_vma_name(struct vm_area_struct *vma); +__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma); #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */