1 #ifndef __ASM_MEMORY_MODEL_H
2 #define __ASM_MEMORY_MODEL_H
6 #if defined(CONFIG_FLATMEM)
8 #ifndef ARCH_PFN_OFFSET
9 #define ARCH_PFN_OFFSET (0UL)
12 #elif defined(CONFIG_DISCONTIGMEM)
14 #ifndef arch_pfn_to_nid
15 #define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
18 #ifndef arch_local_page_offset
19 #define arch_local_page_offset(pfn, nid) \
20 ((pfn) - NODE_DATA(nid)->node_start_pfn)
23 #endif /* CONFIG_DISCONTIGMEM */
26 * supports 3 memory models.
28 #if defined(CONFIG_FLATMEM)
30 #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
31 #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
33 #elif defined(CONFIG_DISCONTIGMEM)
35 #define __pfn_to_page(pfn) \
36 ({ unsigned long __pfn = (pfn); \
37 unsigned long __nid = arch_pfn_to_nid(__pfn); \
38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
41 #define __page_to_pfn(pg) \
42 ({ struct page *__pg = (pg); \
43 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
44 (unsigned long)(__pg - __pgdat->node_mem_map) + \
45 __pgdat->node_start_pfn; \
48 #elif defined(CONFIG_SPARSEMEM_VMEMMAP)
50 /* memmap is virtually contigious. */
51 #define __pfn_to_page(pfn) (vmemmap + (pfn))
52 #define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
54 #elif defined(CONFIG_SPARSEMEM)
56 * Note: section's mem_map is encorded to reflect its start_pfn.
57 * section[i].section_mem_map == mem_map's address - start_pfn;
59 #define __page_to_pfn(pg) \
60 ({ struct page *__pg = (pg); \
61 int __sec = page_to_section(__pg); \
62 (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
65 #define __pfn_to_page(pfn) \
66 ({ unsigned long __pfn = (pfn); \
67 struct mem_section *__sec = __pfn_to_section(__pfn); \
68 __section_mem_map_addr(__sec) + __pfn; \
70 #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
72 #define page_to_pfn __page_to_pfn
73 #define pfn_to_page __pfn_to_page
75 #endif /* __ASSEMBLY__ */