2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * From i386 code copyright (C) 1995 Linus Torvalds
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
26 #include <linux/smp.h>
27 #include <linux/smp_lock.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/tty.h>
31 #include <linux/vt_kern.h> /* For unblank_screen() */
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/kprobes.h>
35 #include <linux/hugetlb.h>
36 #include <linux/syscalls.h>
37 #include <linux/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/pgalloc.h>
41 #include <asm/sections.h>
42 #include <asm/traps.h>
43 #include <asm/syscalls.h>
45 #include <arch/interrupts.h>
47 static noinline void force_sig_info_fault(int si_signo, int si_code,
48 unsigned long address, int fault_num, struct task_struct *tsk)
52 if (unlikely(tsk->pid < 2)) {
53 panic("Signal %d (code %d) at %#lx sent to %s!",
54 si_signo, si_code & 0xffff, address,
55 tsk->pid ? "init" : "the idle task");
58 info.si_signo = si_signo;
60 info.si_code = si_code;
61 info.si_addr = (void __user *)address;
62 info.si_trapno = fault_num;
63 force_sig_info(si_signo, &info, tsk);
68 * Synthesize the fault a PL0 process would get by doing a word-load of
69 * an unaligned address or a high kernel address. Called indirectly
70 * from sys_cmpxchg() in kernel/intvec.S.
72 int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs)
74 if (address >= PAGE_OFFSET)
75 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address,
76 INT_DTLB_MISS, current);
78 force_sig_info_fault(SIGBUS, BUS_ADRALN, address,
79 INT_UNALIGN_DATA, current);
82 * Adjust pc to point at the actual instruction, which is unusual
83 * for syscalls normally, but is appropriate when we are claiming
84 * that a syscall swint1 caused a page fault or bus error.
89 * Mark this as a caller-save interrupt, like a normal page fault,
90 * so that when we go through the signal handler path we will
91 * properly restore r0, r1, and r2 for the signal handler arguments.
93 regs->flags |= PT_FLAGS_CALLER_SAVES;
99 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
101 unsigned index = pgd_index(address);
107 pgd_k = init_mm.pgd + index;
109 if (!pgd_present(*pgd_k))
112 pud = pud_offset(pgd, address);
113 pud_k = pud_offset(pgd_k, address);
114 if (!pud_present(*pud_k))
117 pmd = pmd_offset(pud, address);
118 pmd_k = pmd_offset(pud_k, address);
119 if (!pmd_present(*pmd_k))
121 if (!pmd_present(*pmd)) {
122 set_pmd(pmd, *pmd_k);
123 arch_flush_lazy_mmu_mode();
125 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
130 * Handle a fault on the vmalloc or module mapping area
132 static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
137 /* Make sure we are in vmalloc area */
138 if (!(address >= VMALLOC_START && address < VMALLOC_END))
142 * Synchronize this task's top level page-table
143 * with the 'reference' page table.
145 pmd_k = vmalloc_sync_one(pgd, address);
148 if (pmd_huge(*pmd_k))
149 return 0; /* support TILE huge_vmap() API */
150 pte_k = pte_offset_kernel(pmd_k, address);
151 if (!pte_present(*pte_k))
156 /* Wait until this PTE has completed migration. */
157 static void wait_for_migration(pte_t *pte)
159 if (pte_migrating(*pte)) {
161 * Wait until the migrater fixes up this pte.
162 * We scale the loop count by the clock rate so we'll wait for
163 * a few seconds here.
166 int bound = get_clock_rate();
167 while (pte_migrating(*pte)) {
169 if (++retries > bound)
170 panic("Hit migrating PTE (%#llx) and"
171 " page PFN %#lx still migrating",
172 pte->val, pte_pfn(*pte));
178 * It's not generally safe to use "current" to get the page table pointer,
179 * since we might be running an oprofile interrupt in the middle of a
182 static pgd_t *get_current_pgd(void)
184 HV_Context ctx = hv_inquire_context();
185 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
186 struct page *pgd_page = pfn_to_page(pgd_pfn);
187 BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */
188 return (pgd_t *) __va(ctx.page_table);
192 * We can receive a page fault from a migrating PTE at any time.
193 * Handle it by just waiting until the fault resolves.
195 * It's also possible to get a migrating kernel PTE that resolves
196 * itself during the downcall from hypervisor to Linux. We just check
197 * here to see if the PTE seems valid, and if so we retry it.
199 * NOTE! We MUST NOT take any locks for this case. We may be in an
200 * interrupt or a critical region, and must do as little as possible.
201 * Similarly, we can't use atomic ops here, since we may be handling a
202 * fault caused by an atomic op access.
204 static int handle_migrating_pte(pgd_t *pgd, int fault_num,
205 unsigned long address,
206 int is_kernel_mode, int write)
213 if (pgd_addr_invalid(address))
216 pgd += pgd_index(address);
217 pud = pud_offset(pgd, address);
218 if (!pud || !pud_present(*pud))
220 pmd = pmd_offset(pud, address);
221 if (!pmd || !pmd_present(*pmd))
223 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
224 pte_offset_kernel(pmd, address);
226 if (pte_migrating(pteval)) {
227 wait_for_migration(pte);
231 if (!is_kernel_mode || !pte_present(pteval))
233 if (fault_num == INT_ITLB_MISS) {
234 if (pte_exec(pteval))
237 if (pte_write(pteval))
240 if (pte_read(pteval))
248 * This routine is responsible for faulting in user pages.
249 * It passes the work off to one of the appropriate routines.
250 * It returns true if the fault was successfully handled.
252 static int handle_page_fault(struct pt_regs *regs,
255 unsigned long address,
258 struct task_struct *tsk;
259 struct mm_struct *mm;
260 struct vm_area_struct *vma;
261 unsigned long stack_offset;
267 /* on TILE, protection faults are always writes */
271 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
273 tsk = validate_current();
276 * Check to see if we might be overwriting the stack, and bail
277 * out if so. The page fault code is a relatively likely
278 * place to get trapped in an infinite regress, and once we
279 * overwrite the whole stack, it becomes very hard to recover.
281 stack_offset = stack_pointer & (THREAD_SIZE-1);
282 if (stack_offset < THREAD_SIZE / 8) {
283 pr_alert("Potential stack overrun: sp %#lx\n",
286 pr_alert("Killing current process %d/%s\n",
287 tsk->pid, tsk->comm);
288 do_group_exit(SIGKILL);
292 * Early on, we need to check for migrating PTE entries;
293 * see homecache.c. If we find a migrating PTE, we wait until
294 * the backing page claims to be done migrating, then we procede.
295 * For kernel PTEs, we rewrite the PTE and return and retry.
296 * Otherwise, we treat the fault like a normal "no PTE" fault,
297 * rather than trying to patch up the existing PTE.
299 pgd = get_current_pgd();
300 if (handle_migrating_pte(pgd, fault_num, address,
301 is_kernel_mode, write))
304 si_code = SEGV_MAPERR;
307 * We fault-in kernel-space virtual memory on-demand. The
308 * 'reference' page table is init_mm.pgd.
310 * NOTE! We MUST NOT take any locks for this case. We may
311 * be in an interrupt or a critical region, and should
312 * only copy the information from the master page table,
315 * This verifies that the fault happens in kernel space
316 * and that the fault was not a protection fault.
318 if (unlikely(address >= TASK_SIZE &&
319 !is_arch_mappable_range(address, 0))) {
320 if (is_kernel_mode && is_page_fault &&
321 vmalloc_fault(pgd, address) >= 0)
324 * Don't take the mm semaphore here. If we fixup a prefetch
325 * fault we could otherwise deadlock.
327 mm = NULL; /* happy compiler */
329 goto bad_area_nosemaphore;
333 * If we're trying to touch user-space addresses, we must
334 * be either at PL0, or else with interrupts enabled in the
335 * kernel, so either way we can re-enable interrupts here.
342 * If we're in an interrupt, have no user context or are running in an
343 * atomic region then we must not take the fault.
345 if (in_atomic() || !mm) {
346 vma = NULL; /* happy compiler */
347 goto bad_area_nosemaphore;
351 * When running in the kernel we expect faults to occur only to
352 * addresses in user space. All other faults represent errors in the
353 * kernel and should generate an OOPS. Unfortunately, in the case of an
354 * erroneous fault occurring in a code path which already holds mmap_sem
355 * we will deadlock attempting to validate the fault against the
356 * address space. Luckily the kernel only validly references user
357 * space from well defined areas of code, which are listed in the
360 * As the vast majority of faults will be valid we will only perform
361 * the source reference check when there is a possibility of a deadlock.
362 * Attempt to lock the address space, if we cannot we then validate the
363 * source. If this is invalid we can skip the address space check,
364 * thus avoiding the deadlock.
366 if (!down_read_trylock(&mm->mmap_sem)) {
367 if (is_kernel_mode &&
368 !search_exception_tables(regs->pc)) {
369 vma = NULL; /* happy compiler */
370 goto bad_area_nosemaphore;
372 down_read(&mm->mmap_sem);
375 vma = find_vma(mm, address);
378 if (vma->vm_start <= address)
380 if (!(vma->vm_flags & VM_GROWSDOWN))
382 if (regs->sp < PAGE_OFFSET) {
384 * accessing the stack below sp is always a bug.
386 if (address < regs->sp)
389 if (expand_stack(vma, address))
393 * Ok, we have a good vm_area for this memory access, so
397 si_code = SEGV_ACCERR;
398 if (fault_num == INT_ITLB_MISS) {
399 if (!(vma->vm_flags & VM_EXEC))
402 #ifdef TEST_VERIFY_AREA
403 if (!is_page_fault && regs->cs == KERNEL_CS)
404 pr_err("WP fault at "REGFMT"\n", regs->eip);
406 if (!(vma->vm_flags & VM_WRITE))
409 if (!is_page_fault || !(vma->vm_flags & VM_READ))
415 * If for any reason at all we couldn't handle the fault,
416 * make sure we exit gracefully rather than endlessly redo
419 fault = handle_mm_fault(mm, vma, address, write);
420 if (unlikely(fault & VM_FAULT_ERROR)) {
421 if (fault & VM_FAULT_OOM)
423 else if (fault & VM_FAULT_SIGBUS)
427 if (fault & VM_FAULT_MAJOR)
432 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
434 * If this was an asynchronous fault,
435 * restart the appropriate engine.
438 #if CHIP_HAS_TILE_DMA()
439 case INT_DMATLB_MISS:
440 case INT_DMATLB_MISS_DWNCL:
441 case INT_DMATLB_ACCESS:
442 case INT_DMATLB_ACCESS_DWNCL:
443 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
446 #if CHIP_HAS_SN_PROC()
447 case INT_SNITLB_MISS:
448 case INT_SNITLB_MISS_DWNCL:
449 __insn_mtspr(SPR_SNCTL,
450 __insn_mfspr(SPR_SNCTL) &
451 ~SPR_SNCTL__FRZPROC_MASK);
457 up_read(&mm->mmap_sem);
461 * Something tried to access memory that isn't in our memory map..
462 * Fix it, but check if it's kernel or user first..
465 up_read(&mm->mmap_sem);
467 bad_area_nosemaphore:
468 /* User mode accesses just cause a SIGSEGV */
469 if (!is_kernel_mode) {
471 * It's possible to have interrupts off here.
475 force_sig_info_fault(SIGSEGV, si_code, address,
481 /* Are we prepared to handle this kernel fault? */
482 if (fixup_exception(regs))
486 * Oops. The kernel tried to access some bad page. We'll have to
487 * terminate things with extreme prejudice.
492 /* FIXME: no lookup_address() yet */
493 #ifdef SUPPORT_LOOKUP_ADDRESS
494 if (fault_num == INT_ITLB_MISS) {
495 pte_t *pte = lookup_address(address);
497 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
498 pr_crit("kernel tried to execute"
499 " non-executable page - exploit attempt?"
500 " (uid: %d)\n", current->uid);
503 if (address < PAGE_SIZE)
504 pr_alert("Unable to handle kernel NULL pointer dereference\n");
506 pr_alert("Unable to handle kernel paging request\n");
507 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
512 if (unlikely(tsk->pid < 2)) {
513 panic("Kernel page fault running %s!",
514 tsk->pid ? "init" : "the idle task");
518 * More FIXME: we should probably copy the i386 here and
519 * implement a generic die() routine. Not today.
526 do_group_exit(SIGKILL);
529 * We ran out of memory, or some other thing happened to us that made
530 * us unable to handle the page fault gracefully.
533 up_read(&mm->mmap_sem);
534 if (is_global_init(tsk)) {
536 down_read(&mm->mmap_sem);
539 pr_alert("VM: killing process %s\n", tsk->comm);
541 do_group_exit(SIGKILL);
545 up_read(&mm->mmap_sem);
547 /* Kernel mode? Handle exceptions or die */
551 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk);
557 /* We must release ICS before panicking or we won't get anywhere. */
558 #define ics_panic(fmt, ...) do { \
559 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
560 panic(fmt, __VA_ARGS__); \
564 * When we take an ITLB or DTLB fault or access violation in the
565 * supervisor while the critical section bit is set, the hypervisor is
566 * reluctant to write new values into the EX_CONTEXT_1_x registers,
567 * since that might indicate we have not yet squirreled the SPR
568 * contents away and can thus safely take a recursive interrupt.
569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
571 struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
572 unsigned long address,
575 unsigned long pc = info & ~1;
576 int write = info & 1;
577 pgd_t *pgd = get_current_pgd();
579 /* Retval is 1 at first since we will handle the fault fully. */
580 struct intvec_state state = {
581 do_page_fault, fault_num, address, write, 1
584 /* Validate that we are plausibly in the right routine. */
585 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
586 (fault_num != INT_DTLB_MISS &&
587 fault_num != INT_DTLB_ACCESS)) {
588 unsigned long old_pc = regs->pc;
590 ics_panic("Bad ICS page fault args:"
591 " old PC %#lx, fault %d/%d at %#lx\n",
592 old_pc, fault_num, write, address);
595 /* We might be faulting on a vmalloc page, so check that first. */
596 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
600 * If we faulted with ICS set in sys_cmpxchg, we are providing
601 * a user syscall service that should generate a signal on
602 * fault. We didn't set up a kernel stack on initial entry to
603 * sys_cmpxchg, but instead had one set up by the fault, which
604 * (because sys_cmpxchg never releases ICS) came to us via the
605 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are
606 * still referencing the original user code. We release the
607 * atomic lock and rewrite pt_regs so that it appears that we
608 * came from user-space directly, and after we finish the
609 * fault we'll go back to user space and re-issue the swint.
610 * This way the backtrace information is correct if we need to
611 * emit a stack dump at any point while handling this.
613 * Must match register use in sys_cmpxchg().
615 if (pc >= (unsigned long) sys_cmpxchg &&
616 pc < (unsigned long) __sys_cmpxchg_end) {
618 /* Don't unlock before we could have locked. */
619 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
620 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
621 __atomic_fault_unlock(lock_ptr);
624 regs->sp = regs->regs[27];
628 * We can also fault in the atomic assembly, in which
629 * case we use the exception table to do the first-level fixup.
630 * We may re-fixup again in the real fault handler if it
631 * turns out the faulting address is just bad, and not,
632 * for example, migrating.
634 else if (pc >= (unsigned long) __start_atomic_asm_code &&
635 pc < (unsigned long) __end_atomic_asm_code) {
636 const struct exception_table_entry *fixup;
638 /* Unlock the atomic lock. */
639 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
640 __atomic_fault_unlock(lock_ptr);
642 fixup = search_exception_tables(pc);
644 ics_panic("ICS atomic fault not in table:"
645 " PC %#lx, fault %d", pc, fault_num);
646 regs->pc = fixup->fixup;
647 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
651 * NOTE: the one other type of access that might bring us here
652 * are the memory ops in __tns_atomic_acquire/__tns_atomic_release,
653 * but we don't have to check specially for them since we can
654 * always safely return to the address of the fault and retry,
655 * since no separate atomic locks are involved.
659 * Now that we have released the atomic lock (if necessary),
660 * it's safe to spin if the PTE that caused the fault was migrating.
662 if (fault_num == INT_DTLB_ACCESS)
664 if (handle_migrating_pte(pgd, fault_num, address, 1, write))
667 /* Return zero so that we continue on with normal fault handling. */
672 #endif /* !__tilegx__ */
675 * This routine handles page faults. It determines the address, and the
676 * problem, and then passes it handle_page_fault() for normal DTLB and
677 * ITLB issues, and for DMA or SN processor faults when we are in user
678 * space. For the latter, if we're in kernel mode, we just save the
679 * interrupt away appropriately and return immediately. We can't do
680 * page faults for user code while in kernel mode.
682 void do_page_fault(struct pt_regs *regs, int fault_num,
683 unsigned long address, unsigned long write)
687 /* This case should have been handled by do_page_fault_ics(). */
690 #if CHIP_HAS_TILE_DMA()
692 * If it's a DMA fault, suspend the transfer while we're
693 * handling the miss; we'll restart after it's handled. If we
694 * don't suspend, it's possible that this process could swap
695 * out and back in, and restart the engine since the DMA is
698 if (fault_num == INT_DMATLB_MISS ||
699 fault_num == INT_DMATLB_ACCESS ||
700 fault_num == INT_DMATLB_MISS_DWNCL ||
701 fault_num == INT_DMATLB_ACCESS_DWNCL) {
702 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
703 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
704 SPR_DMA_STATUS__BUSY_MASK)
709 /* Validate fault num and decide if this is a first-time page fault. */
713 #if CHIP_HAS_TILE_DMA()
714 case INT_DMATLB_MISS:
715 case INT_DMATLB_MISS_DWNCL:
717 #if CHIP_HAS_SN_PROC()
718 case INT_SNITLB_MISS:
719 case INT_SNITLB_MISS_DWNCL:
724 case INT_DTLB_ACCESS:
725 #if CHIP_HAS_TILE_DMA()
726 case INT_DMATLB_ACCESS:
727 case INT_DMATLB_ACCESS_DWNCL:
733 panic("Bad fault number %d in do_page_fault", fault_num);
736 if (EX1_PL(regs->ex1) != USER_PL) {
737 struct async_tlb *async;
739 #if CHIP_HAS_TILE_DMA()
740 case INT_DMATLB_MISS:
741 case INT_DMATLB_ACCESS:
742 case INT_DMATLB_MISS_DWNCL:
743 case INT_DMATLB_ACCESS_DWNCL:
744 async = ¤t->thread.dma_async_tlb;
747 #if CHIP_HAS_SN_PROC()
748 case INT_SNITLB_MISS:
749 case INT_SNITLB_MISS_DWNCL:
750 async = ¤t->thread.sn_async_tlb;
759 * No vmalloc check required, so we can allow
760 * interrupts immediately at this point.
764 set_thread_flag(TIF_ASYNC_TLB);
765 if (async->fault_num != 0) {
766 panic("Second async fault %d;"
767 " old fault was %d (%#lx/%ld)",
768 fault_num, async->fault_num,
771 BUG_ON(fault_num == 0);
772 async->fault_num = fault_num;
773 async->is_fault = is_page_fault;
774 async->is_write = write;
775 async->address = address;
780 handle_page_fault(regs, fault_num, is_page_fault, address, write);
784 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
786 * Check an async_tlb structure to see if a deferred fault is waiting,
787 * and if so pass it to the page-fault code.
789 static void handle_async_page_fault(struct pt_regs *regs,
790 struct async_tlb *async)
792 if (async->fault_num) {
794 * Clear async->fault_num before calling the page-fault
795 * handler so that if we re-interrupt before returning
796 * from the function we have somewhere to put the
797 * information from the new interrupt.
799 int fault_num = async->fault_num;
800 async->fault_num = 0;
801 handle_page_fault(regs, fault_num, async->is_fault,
802 async->address, async->is_write);
805 #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
809 * This routine effectively re-issues asynchronous page faults
810 * when we are returning to user space.
812 void do_async_page_fault(struct pt_regs *regs)
815 * Clear thread flag early. If we re-interrupt while processing
816 * code here, we will reset it and recall this routine before
817 * returning to user space.
819 clear_thread_flag(TIF_ASYNC_TLB);
821 #if CHIP_HAS_TILE_DMA()
822 handle_async_page_fault(regs, ¤t->thread.dma_async_tlb);
824 #if CHIP_HAS_SN_PROC()
825 handle_async_page_fault(regs, ¤t->thread.sn_async_tlb);
829 void vmalloc_sync_all(void)
832 /* Currently all L1 kernel pmd's are static and shared. */
833 BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START));
836 * Note that races in the updates of insync and start aren't
837 * problematic: insync can only get set bits added, and updates to
838 * start are only improving performance (without affecting correctness
841 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
842 static unsigned long start = PAGE_OFFSET;
843 unsigned long address;
845 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
846 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
847 if (!test_bit(pgd_index(address), insync)) {
849 struct list_head *pos;
851 spin_lock_irqsave(&pgd_lock, flags);
852 list_for_each(pos, &pgd_list)
853 if (!vmalloc_sync_one(list_to_pgd(pos),
855 /* Must be at first entry in list. */
856 BUG_ON(pos != pgd_list.next);
859 spin_unlock_irqrestore(&pgd_lock, flags);
860 if (pos != pgd_list.next)
861 set_bit(pgd_index(address), insync);
863 if (address == start && test_bit(pgd_index(address), insync))
864 start = address + PGDIR_SIZE;