X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Farm%2Finclude%2Fasm%2Fpgtable.h;h=8ade1840c6f232f29421517687774eba6b075900;hb=18974369cfe23acf16d0fb79e0d1fba7a9a95ec0;hp=5750704e02718b9247704021a0832f161bcbaf3b;hpb=a18f22a968de17b29f2310cdb7ba69163e65ec15;p=pandora-kernel.git diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 5750704e0271..8ade1840c6f2 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -24,6 +24,8 @@ #include #include +#include + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the @@ -41,79 +43,6 @@ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #endif -/* - * Hardware-wise, we have a two level page table structure, where the first - * level has 4096 entries, and the second level has 256 entries. Each entry - * is one 32-bit word. Most of the bits in the second level entry are used - * by hardware, and there aren't any "accessed" and "dirty" bits. - * - * Linux on the other hand has a three level page table structure, which can - * be wrapped to fit a two level page table structure easily - using the PGD - * and PTE only. However, Linux also expects one "PTE" table per page, and - * at least a "dirty" bit. - * - * Therefore, we tweak the implementation slightly - we tell Linux that we - * have 2048 entries in the first level, each of which is 8 bytes (iow, two - * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, preceded by Linux versions - * which contain the state information Linux needs. We, therefore, end up - * with 512 entries in the "PTE" level. - * - * This leads to the page tables having the following layout: - * - * pgd pte - * | | - * +--------+ - * | | +------------+ +0 - * +- - - - + | Linux pt 0 | - * | | +------------+ +1024 - * +--------+ +0 | Linux pt 1 | - * | |-----> +------------+ +2048 - * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +3072 - * +--------+ +8 | h/w pt 1 | - * | | +------------+ +4096 - * - * See L_PTE_xxx below for definitions of bits in the "Linux pt", and - * PTE_xxx for definitions of bits appearing in the "h/w pt". - * - * PMD_xxx definitions refer to bits in the first level page table. - * - * The "dirty" bit is emulated by only granting hardware write permission - * iff the page is marked "writable" and "dirty" in the Linux PTE. This - * means that a write to a clean page will cause a permission fault, and - * the Linux MM layer will mark the page dirty via handle_pte_fault(). - * For the hardware to notice the permission change, the TLB entry must - * be flushed, and ptep_set_access_flags() does that for us. - * - * The "accessed" or "young" bit is emulated by a similar method; we only - * allow accesses to the page if the "young" bit is set. Accesses to the - * page will cause a fault, and handle_pte_fault() will set the young bit - * for us as long as the page is marked present in the corresponding Linux - * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is - * up to date. - * - * However, when the "young" bit is cleared, we deny access to the page - * by clearing the hardware PTE. Currently Linux does not flush the TLB - * for us in this case, which means the TLB will retain the transation - * until either the TLB entry is evicted under pressure, or a context - * switch which changes the user space mapping occurs. - */ -#define PTRS_PER_PTE 512 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 2048 - -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) -#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) -#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) - -/* - * PMD_SHIFT determines the size of the area a second-level page table can map - * PGDIR_SHIFT determines what a third-level page table entry can map - */ -#define PMD_SHIFT 21 -#define PGDIR_SHIFT 21 - #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ @@ -124,12 +53,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) -#endif /* !__ASSEMBLY__ */ - -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) /* * This is the lowest virtual address we can permit any user space @@ -138,60 +61,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); */ #define FIRST_USER_ADDRESS PAGE_SIZE -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) - -/* - * section address mask and size definitions. - */ -#define SECTION_SHIFT 20 -#define SECTION_SIZE (1UL << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - -/* - * ARMv6 supersection address mask and size definitions. - */ -#define SUPERSECTION_SHIFT 24 -#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) -#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) - -/* - * "Linux" PTE definitions. - * - * We keep two sets of PTEs - the hardware and the linux version. - * This allows greater flexibility in the way we map the Linux bits - * onto the hardware tables, and allows us to have YOUNG and DIRTY - * bits. - * - * The PTE table pointer refers to the hardware entries; the "Linux" - * entries are stored 1024 bytes below. - */ -#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) -#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) -#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ -#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) -#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) -#define L_PTE_USER (_AT(pteval_t, 1) << 8) -#define L_PTE_XN (_AT(pteval_t, 1) << 9) -#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ - -/* - * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB - */ -#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ -#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) - -#ifndef __ASSEMBLY__ - /* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, @@ -327,10 +196,10 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; static inline pte_t *pmd_page_vaddr(pmd_t pmd) { - return __va(pmd_val(pmd) & PAGE_MASK); + return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); } -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) /* we don't need complex calculations here as the pmd is folded into the pgd */ #define pmd_addr_end(addr,end) (end) @@ -351,7 +220,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) #define pte_unmap(pte) __pte_unmap(pte) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) #define pte_page(pte) pfn_to_page(pte_pfn(pte))