2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/poison.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
36 #include <asm/fixmap.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
43 #include <asm/sections.h>
49 const struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
93 static __init void *spp_getpage(void)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103 Dprintk("spp_getpage %p\n", ptr);
107 static __init void set_pte_phys(unsigned long vaddr,
108 unsigned long phys, pgprot_t prot)
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 set_pte(pte, new_pte);
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
152 __flush_tlb_one(vaddr);
155 /* NOTE: this is meant to be run only at boot */
157 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 static __meminit void *alloc_low_page(unsigned long *phys)
172 unsigned long pfn = table_end++;
176 adr = (void *)get_zeroed_page(GFP_ATOMIC);
182 panic("alloc_low_page: ran out of memory");
184 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
185 memset(adr, 0, PAGE_SIZE);
186 *phys = pfn * PAGE_SIZE;
190 static __meminit void unmap_low_page(void *adr)
196 early_iounmap(adr, PAGE_SIZE);
199 /* Must run before zap_low_mappings */
200 __init void *early_ioremap(unsigned long addr, unsigned long size)
203 pmd_t *pmd, *last_pmd;
206 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
207 vaddr = __START_KERNEL_map;
208 pmd = level2_kernel_pgt;
209 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
210 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
211 for (i = 0; i < pmds; i++) {
212 if (pmd_present(pmd[i]))
215 vaddr += addr & ~PMD_MASK;
217 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
218 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
220 return (void *)vaddr;
224 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
228 /* To avoid virtual aliases later */
229 __init void early_iounmap(void *addr, unsigned long size)
235 vaddr = (unsigned long)addr;
236 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
237 pmd = level2_kernel_pgt + pmd_index(vaddr);
238 for (i = 0; i < pmds; i++)
243 static void __meminit
244 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
246 int i = pmd_index(address);
248 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
250 pmd_t *pmd = pmd_page + pmd_index(address);
252 if (address >= end) {
254 for (; i < PTRS_PER_PMD; i++, pmd++)
255 set_pmd(pmd, __pmd(0));
262 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
263 entry &= __supported_pte_mask;
264 set_pmd(pmd, __pmd(entry));
268 static void __meminit
269 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
271 pmd_t *pmd = pmd_offset(pud,0);
272 spin_lock(&init_mm.page_table_lock);
273 phys_pmd_init(pmd, address, end);
274 spin_unlock(&init_mm.page_table_lock);
278 static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
280 int i = pud_index(addr);
283 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
284 unsigned long pmd_phys;
285 pud_t *pud = pud_page + pud_index(addr);
291 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
292 set_pud(pud, __pud(0));
297 phys_pmd_update(pud, addr, end);
301 pmd = alloc_low_page(&pmd_phys);
302 spin_lock(&init_mm.page_table_lock);
303 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
304 phys_pmd_init(pmd, addr, end);
305 spin_unlock(&init_mm.page_table_lock);
311 static void __init find_early_table_space(unsigned long end)
313 unsigned long puds, pmds, tables, start;
315 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
316 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
317 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
318 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
320 /* RED-PEN putting page tables only on node 0 could
321 cause a hotspot and fill up ZONE_DMA. The page tables
322 need roughly 0.5KB per GB. */
324 table_start = find_e820_area(start, end, tables);
325 if (table_start == -1UL)
326 panic("Cannot find space for the kernel page tables");
328 table_start >>= PAGE_SHIFT;
329 table_end = table_start;
331 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
332 end, table_start << PAGE_SHIFT,
333 (table_start << PAGE_SHIFT) + tables);
336 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
337 This runs before bootmem is initialized and gets pages directly from the
338 physical memory. To access them they are temporarily mapped. */
339 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
343 Dprintk("init_memory_mapping\n");
346 * Find space for the kernel direct mapping tables.
347 * Later we should allocate these tables in the local node of the memory
348 * mapped. Unfortunately this is done currently before the nodes are
352 find_early_table_space(end);
354 start = (unsigned long)__va(start);
355 end = (unsigned long)__va(end);
357 for (; start < end; start = next) {
358 unsigned long pud_phys;
359 pgd_t *pgd = pgd_offset_k(start);
363 pud = pud_offset(pgd, start & PGDIR_MASK);
365 pud = alloc_low_page(&pud_phys);
367 next = start + PGDIR_SIZE;
370 phys_pud_init(pud, __pa(start), __pa(next));
372 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
377 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
382 void __init paging_init(void)
384 unsigned long max_zone_pfns[MAX_NR_ZONES];
385 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
386 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
387 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
388 max_zone_pfns[ZONE_NORMAL] = end_pfn;
390 memory_present(0, 0, end_pfn);
392 free_area_init_nodes(max_zone_pfns);
396 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
397 from the CPU leading to inconsistent cache lines. address and size
398 must be aligned to 2MB boundaries.
399 Does nothing when the mapping doesn't exist. */
400 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
402 unsigned long end = address + size;
404 BUG_ON(address & ~LARGE_PAGE_MASK);
405 BUG_ON(size & ~LARGE_PAGE_MASK);
407 for (; address < end; address += LARGE_PAGE_SIZE) {
408 pgd_t *pgd = pgd_offset_k(address);
413 pud = pud_offset(pgd, address);
416 pmd = pmd_offset(pud, address);
417 if (!pmd || pmd_none(*pmd))
419 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
420 /* Could handle this, but it should not happen currently. */
422 "clear_kernel_mapping: mapping has been split. will leak memory\n");
425 set_pmd(pmd, __pmd(0));
431 * Memory hotplug specific functions
433 void online_page(struct page *page)
435 ClearPageReserved(page);
436 init_page_count(page);
442 #ifdef CONFIG_MEMORY_HOTPLUG
444 * Memory is added always to NORMAL zone. This means you will never get
445 * additional DMA/DMA32 memory.
447 int arch_add_memory(int nid, u64 start, u64 size)
449 struct pglist_data *pgdat = NODE_DATA(nid);
450 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
451 unsigned long start_pfn = start >> PAGE_SHIFT;
452 unsigned long nr_pages = size >> PAGE_SHIFT;
455 init_memory_mapping(start, (start + size -1));
457 ret = __add_pages(zone, start_pfn, nr_pages);
463 printk("%s: Problem encountered in __add_pages!\n", __func__);
466 EXPORT_SYMBOL_GPL(arch_add_memory);
468 int remove_memory(u64 start, u64 size)
472 EXPORT_SYMBOL_GPL(remove_memory);
474 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
475 int memory_add_physaddr_to_nid(u64 start)
479 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
482 #endif /* CONFIG_MEMORY_HOTPLUG */
484 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
486 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
487 * just online the pages.
489 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
493 unsigned long total = 0, mem = 0;
494 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
495 if (pfn_valid(pfn)) {
496 online_page(pfn_to_page(pfn));
503 z->spanned_pages += total;
504 z->present_pages += mem;
505 z->zone_pgdat->node_spanned_pages += total;
506 z->zone_pgdat->node_present_pages += mem;
512 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
515 void __init mem_init(void)
517 long codesize, reservedpages, datasize, initsize;
521 /* clear the zero-page */
522 memset(empty_zero_page, 0, PAGE_SIZE);
526 /* this will put all low memory onto the freelists */
528 totalram_pages = numa_free_all_bootmem();
530 totalram_pages = free_all_bootmem();
532 reservedpages = end_pfn - totalram_pages -
533 absent_pages_in_range(0, end_pfn);
537 codesize = (unsigned long) &_etext - (unsigned long) &_text;
538 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
539 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
541 /* Register memory areas for /proc/kcore */
542 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
543 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
544 VMALLOC_END-VMALLOC_START);
545 kclist_add(&kcore_kernel, &_stext, _end - _stext);
546 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
547 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
548 VSYSCALL_END - VSYSCALL_START);
550 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
551 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
552 end_pfn << (PAGE_SHIFT-10),
554 reservedpages << (PAGE_SHIFT-10),
559 void free_init_pages(char *what, unsigned long begin, unsigned long end)
566 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
567 for (addr = begin; addr < end; addr += PAGE_SIZE) {
568 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
569 ClearPageReserved(page);
570 init_page_count(page);
571 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
577 void free_initmem(void)
579 memset(__initdata_begin, POISON_FREE_INITDATA,
580 __initdata_end - __initdata_begin);
581 free_init_pages("unused kernel memory",
582 __pa_symbol(&__init_begin),
583 __pa_symbol(&__init_end));
586 #ifdef CONFIG_DEBUG_RODATA
588 void mark_rodata_ro(void)
590 unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata));
591 unsigned long end = (unsigned long)__va(__pa_symbol(&__end_rodata));
593 for (; addr < end; addr += PAGE_SIZE)
594 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
596 printk ("Write protecting the kernel read-only data: %luk\n",
597 (__end_rodata - __start_rodata) >> 10);
600 * change_page_attr_addr() requires a global_flush_tlb() call after it.
601 * We do this after the printk so that if something went wrong in the
602 * change, the printk gets out at least to give a better debug hint
603 * of who is the culprit.
609 #ifdef CONFIG_BLK_DEV_INITRD
610 void free_initrd_mem(unsigned long start, unsigned long end)
612 free_init_pages("initrd memory", __pa(start), __pa(end));
616 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
619 int nid = phys_to_nid(phys);
621 unsigned long pfn = phys >> PAGE_SHIFT;
622 if (pfn >= end_pfn) {
623 /* This can happen with kdump kernels when accessing firmware
625 if (pfn < end_pfn_map)
627 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
632 /* Should check here against the e820 map to avoid double free */
634 reserve_bootmem_node(NODE_DATA(nid), phys, len);
636 reserve_bootmem(phys, len);
638 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
639 dma_reserve += len / PAGE_SIZE;
640 set_dma_reserve(dma_reserve);
644 int kern_addr_valid(unsigned long addr)
646 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
652 if (above != 0 && above != -1UL)
655 pgd = pgd_offset_k(addr);
659 pud = pud_offset(pgd, addr);
663 pmd = pmd_offset(pud, addr);
667 return pfn_valid(pmd_pfn(*pmd));
669 pte = pte_offset_kernel(pmd, addr);
672 return pfn_valid(pte_pfn(*pte));
676 #include <linux/sysctl.h>
678 extern int exception_trace, page_fault_trace;
680 static ctl_table debug_table2[] = {
683 .procname = "exception-trace",
684 .data = &exception_trace,
685 .maxlen = sizeof(int),
687 .proc_handler = proc_dointvec
692 static ctl_table debug_root_table2[] = {
694 .ctl_name = CTL_DEBUG,
697 .child = debug_table2
702 static __init int x8664_sysctl_init(void)
704 register_sysctl_table(debug_root_table2);
707 __initcall(x8664_sysctl_init);
710 /* A pseudo VMA to allow ptrace access for the vsyscall page. This only
711 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
712 not need special handling anymore. */
714 static struct vm_area_struct gate_vma = {
715 .vm_start = VSYSCALL_START,
716 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
717 .vm_page_prot = PAGE_READONLY_EXEC,
718 .vm_flags = VM_READ | VM_EXEC
721 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
723 #ifdef CONFIG_IA32_EMULATION
724 if (test_tsk_thread_flag(tsk, TIF_IA32))
730 int in_gate_area(struct task_struct *task, unsigned long addr)
732 struct vm_area_struct *vma = get_gate_vma(task);
735 return (addr >= vma->vm_start) && (addr < vma->vm_end);
738 /* Use this when you have no reliable task/vma, typically from interrupt
739 * context. It is less reliable than using the task's vma and may give
742 int in_gate_area_no_task(unsigned long addr)
744 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);