4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 #include <linux/bootmem.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/hugetlb.h>
14 #include <linux/slab.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
21 static DEFINE_MUTEX(vmem_mutex);
23 struct memory_segment {
24 struct list_head list;
29 static LIST_HEAD(mem_segs);
31 static void __ref *vmem_alloc_pages(unsigned int order)
33 if (slab_is_available())
34 return (void *)__get_free_pages(GFP_KERNEL, order);
35 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
38 static inline pud_t *vmem_pud_alloc(void)
43 pud = vmem_alloc_pages(2);
46 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
51 static inline pmd_t *vmem_pmd_alloc(void)
56 pmd = vmem_alloc_pages(2);
59 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
64 static pte_t __ref *vmem_pte_alloc(void)
68 if (slab_is_available())
69 pte = (pte_t *) page_table_alloc(&init_mm);
71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
74 if (MACHINE_HAS_HPAGE)
75 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
76 PTRS_PER_PTE * sizeof(pte_t));
78 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
79 PTRS_PER_PTE * sizeof(pte_t));
84 * Add a physical memory range to the 1:1 mapping.
86 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
88 unsigned long address;
96 for (address = start; address < start + size; address += PAGE_SIZE) {
97 pg_dir = pgd_offset_k(address);
98 if (pgd_none(*pg_dir)) {
99 pu_dir = vmem_pud_alloc();
102 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
105 pu_dir = pud_offset(pg_dir, address);
106 if (pud_none(*pu_dir)) {
107 pm_dir = vmem_pmd_alloc();
110 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
113 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
114 pm_dir = pmd_offset(pu_dir, address);
117 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
118 (address + HPAGE_SIZE <= start + size) &&
119 (address >= HPAGE_SIZE)) {
120 pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
122 pmd_val(*pm_dir) = pte_val(pte);
123 address += HPAGE_SIZE - PAGE_SIZE;
127 if (pmd_none(*pm_dir)) {
128 pt_dir = vmem_pte_alloc();
131 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
134 pt_dir = pte_offset_kernel(pm_dir, address);
139 flush_tlb_kernel_range(start, start + size);
144 * Remove a physical memory range from the 1:1 mapping.
145 * Currently only invalidates page table entries.
147 static void vmem_remove_range(unsigned long start, unsigned long size)
149 unsigned long address;
156 pte_val(pte) = _PAGE_TYPE_EMPTY;
157 for (address = start; address < start + size; address += PAGE_SIZE) {
158 pg_dir = pgd_offset_k(address);
159 pu_dir = pud_offset(pg_dir, address);
160 if (pud_none(*pu_dir))
162 pm_dir = pmd_offset(pu_dir, address);
163 if (pmd_none(*pm_dir))
166 if (pmd_huge(*pm_dir)) {
167 pmd_clear_kernel(pm_dir);
168 address += HPAGE_SIZE - PAGE_SIZE;
172 pt_dir = pte_offset_kernel(pm_dir, address);
175 flush_tlb_kernel_range(start, start + size);
179 * Add a backed mem_map array to the virtual mem_map array.
181 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
183 unsigned long address, start_addr, end_addr;
191 start_addr = (unsigned long) start;
192 end_addr = (unsigned long) (start + nr);
194 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
195 pg_dir = pgd_offset_k(address);
196 if (pgd_none(*pg_dir)) {
197 pu_dir = vmem_pud_alloc();
200 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
203 pu_dir = pud_offset(pg_dir, address);
204 if (pud_none(*pu_dir)) {
205 pm_dir = vmem_pmd_alloc();
208 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
211 pm_dir = pmd_offset(pu_dir, address);
212 if (pmd_none(*pm_dir)) {
213 pt_dir = vmem_pte_alloc();
216 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
219 pt_dir = pte_offset_kernel(pm_dir, address);
220 if (pte_none(*pt_dir)) {
221 unsigned long new_page;
223 new_page =__pa(vmem_alloc_pages(0));
226 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
230 memset(start, 0, nr * sizeof(struct page));
233 flush_tlb_kernel_range(start_addr, end_addr);
238 * Add memory segment to the segment list if it doesn't overlap with
239 * an already present segment.
241 static int insert_memory_segment(struct memory_segment *seg)
243 struct memory_segment *tmp;
245 if (seg->start + seg->size > VMEM_MAX_PHYS ||
246 seg->start + seg->size < seg->start)
249 list_for_each_entry(tmp, &mem_segs, list) {
250 if (seg->start >= tmp->start + tmp->size)
252 if (seg->start + seg->size <= tmp->start)
256 list_add(&seg->list, &mem_segs);
261 * Remove memory segment from the segment list.
263 static void remove_memory_segment(struct memory_segment *seg)
265 list_del(&seg->list);
268 static void __remove_shared_memory(struct memory_segment *seg)
270 remove_memory_segment(seg);
271 vmem_remove_range(seg->start, seg->size);
274 int vmem_remove_mapping(unsigned long start, unsigned long size)
276 struct memory_segment *seg;
279 mutex_lock(&vmem_mutex);
282 list_for_each_entry(seg, &mem_segs, list) {
283 if (seg->start == start && seg->size == size)
287 if (seg->start != start || seg->size != size)
291 __remove_shared_memory(seg);
294 mutex_unlock(&vmem_mutex);
298 int vmem_add_mapping(unsigned long start, unsigned long size)
300 struct memory_segment *seg;
303 mutex_lock(&vmem_mutex);
305 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
311 ret = insert_memory_segment(seg);
315 ret = vmem_add_mem(start, size, 0);
321 __remove_shared_memory(seg);
325 mutex_unlock(&vmem_mutex);
330 * map whole physical memory to virtual memory (identity mapping)
331 * we reserve enough space in the vmalloc area for vmemmap to hotplug
332 * additional memory segments.
334 void __init vmem_map_init(void)
336 unsigned long ro_start, ro_end;
337 unsigned long start, end;
340 spin_lock_init(&init_mm.context.list_lock);
341 INIT_LIST_HEAD(&init_mm.context.crst_list);
342 INIT_LIST_HEAD(&init_mm.context.pgtable_list);
343 init_mm.context.noexec = 0;
344 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
345 ro_end = PFN_ALIGN((unsigned long)&_eshared);
346 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
347 start = memory_chunk[i].addr;
348 end = memory_chunk[i].addr + memory_chunk[i].size;
349 if (start >= ro_end || end <= ro_start)
350 vmem_add_mem(start, end - start, 0);
351 else if (start >= ro_start && end <= ro_end)
352 vmem_add_mem(start, end - start, 1);
353 else if (start >= ro_start) {
354 vmem_add_mem(start, ro_end - start, 1);
355 vmem_add_mem(ro_end, end - ro_end, 0);
356 } else if (end < ro_end) {
357 vmem_add_mem(start, ro_start - start, 0);
358 vmem_add_mem(ro_start, end - ro_start, 1);
360 vmem_add_mem(start, ro_start - start, 0);
361 vmem_add_mem(ro_start, ro_end - ro_start, 1);
362 vmem_add_mem(ro_end, end - ro_end, 0);
368 * Convert memory chunk array to a memory segment list so there is a single
369 * list that contains both r/w memory and shared memory segments.
371 static int __init vmem_convert_memory_chunk(void)
373 struct memory_segment *seg;
376 mutex_lock(&vmem_mutex);
377 for (i = 0; i < MEMORY_CHUNKS; i++) {
378 if (!memory_chunk[i].size)
380 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
382 panic("Out of memory...\n");
383 seg->start = memory_chunk[i].addr;
384 seg->size = memory_chunk[i].size;
385 insert_memory_segment(seg);
387 mutex_unlock(&vmem_mutex);
391 core_initcall(vmem_convert_memory_chunk);