Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / arch / arm / include / asm / pgtable-2level.h
index 1cb80c4..18d7bfb 100644 (file)
  * The PTE table pointer refers to the hardware entries; the "Linux"
  * entries are stored 1024 bytes below.
  */
+#define L_PTE_VALID            (_AT(pteval_t, 1) << 0)         /* Valid */
 #define L_PTE_PRESENT          (_AT(pteval_t, 1) << 0)
 #define L_PTE_YOUNG            (_AT(pteval_t, 1) << 1)
 #define L_PTE_FILE             (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
 #define L_PTE_MT_VECTORS       (_AT(pteval_t, 0x0f) << 2)      /* 1111 */
 #define L_PTE_MT_MASK          (_AT(pteval_t, 0x0f) << 2)
 
+#ifndef __ASSEMBLY__
+
+/*
+ * The "pud_xxx()" functions here are trivial when the pmd is folded into
+ * the pud: the pud entry is never bad, always exists, and can't be set or
+ * cleared.
+ */
+#define pud_none(pud)          (0)
+#define pud_bad(pud)           (0)
+#define pud_present(pud)       (1)
+#define pud_clear(pudp)                do { } while (0)
+#define set_pud(pud,pudp)      do { } while (0)
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+       return (pmd_t *)pud;
+}
+
+#define pmd_bad(pmd)           (pmd_val(pmd) & 2)
+
+#define copy_pmd(pmdpd,pmdps)          \
+       do {                            \
+               pmdpd[0] = pmdps[0];    \
+               pmdpd[1] = pmdps[1];    \
+               flush_pmd_entry(pmdpd); \
+       } while (0)
+
+#define pmd_clear(pmdp)                        \
+       do {                            \
+               pmdp[0] = __pmd(0);     \
+               pmdp[1] = __pmd(0);     \
+               clean_pmd_entry(pmdp);  \
+       } while (0)
+
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
+#define pmd_present(pmd)        ((pmd_val(pmd) & PMD_TYPE_MASK) != PMD_TYPE_FAULT)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+
+#ifdef CONFIG_SYS_SUPPORTS_HUGETLBFS
+
+/*
+ * now follows some of the definitions to allow huge page support, we can't put
+ * these in the hugetlb source files as they are also required for transparent
+ * hugepage support.
+ */
+
+#define HPAGE_SHIFT             PMD_SHIFT
+#define HPAGE_SIZE              (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK              (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define HUGE_LINUX_PTE_COUNT       (PAGE_OFFSET >> HPAGE_SHIFT)
+#define HUGE_LINUX_PTE_SIZE        (HUGE_LINUX_PTE_COUNT * sizeof(pte_t *))
+#define HUGE_LINUX_PTE_INDEX(addr) (addr >> HPAGE_SHIFT)
+
+/*
+ *  We re-purpose the following domain bits in the section descriptor
+ */
+#define PMD_DOMAIN_MASK                (_AT(pmdval_t, 0xF) << 5)
+#define PMD_DSECT_DIRTY                (_AT(pmdval_t, 1) << 5)
+#define PMD_DSECT_AF           (_AT(pmdval_t, 1) << 6)
+#define PMD_DSECT_SPLITTING    (_AT(pmdval_t, 1) << 7)
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                               pmd_t *pmdp, pmd_t pmd)
+{
+       /*
+        * we can sometimes be passed a pmd pointing to a level 2 descriptor
+        * from collapse_huge_page.
+        */
+       if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
+               pmdp[0] = __pmd(pmd_val(pmd));
+               pmdp[1] = __pmd(pmd_val(pmd) + 256 * sizeof(pte_t));
+       } else {
+               pmdp[0] = __pmd(pmd_val(pmd));                  /* first 1M section  */
+               pmdp[1] = __pmd(pmd_val(pmd) + SECTION_SIZE);   /* second 1M section */
+       }
+
+       flush_pmd_entry(pmdp);
+}
+
+extern pmdval_t arm_hugepmdprotval;
+extern pteval_t arm_hugepteprotval;
+
+#define pmd_mkhuge(pmd)                (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+PMD_BIT_FUNC(mkold, &= ~PMD_DSECT_AF);
+PMD_BIT_FUNC(mkdirty, |= PMD_DSECT_DIRTY);
+PMD_BIT_FUNC(mkclean, &= ~PMD_DSECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_DSECT_AF);
+PMD_BIT_FUNC(mkwrite, |= PMD_SECT_AP_WRITE);
+PMD_BIT_FUNC(wrprotect,        &= ~PMD_SECT_AP_WRITE);
+PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+PMD_BIT_FUNC(mkexec,   &= ~PMD_SECT_XN);
+PMD_BIT_FUNC(mknexec,  |= PMD_SECT_XN);
+
+#define pmd_young(pmd)                 (pmd_val(pmd) & PMD_DSECT_AF)
+#define pmd_write(pmd)                 (pmd_val(pmd) & PMD_SECT_AP_WRITE)
+#define pmd_exec(pmd)                  (!(pmd_val(pmd) & PMD_SECT_XN))
+#define pmd_dirty(pmd)                 (pmd_val(pmd) & PMD_DSECT_DIRTY)
+
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pmd_modify(pmd, prot)                                                  \
+({                                                                             \
+       pmd_t pmdret = __pmd((pmd_val(pmd) & (PMD_MASK | PMD_DOMAIN_MASK))      \
+               | arm_hugepmdprotval);                                          \
+       pgprot_t inprot = prot;                                                 \
+       pte_t newprot = __pte(pgprot_val(inprot));                              \
+                                                                               \
+       if (pte_dirty(newprot))                                                 \
+               pmdret = pmd_mkdirty(pmdret);                                   \
+       else                                                                    \
+               pmdret = pmd_mkclean(pmdret);                                   \
+                                                                               \
+       if (pte_exec(newprot))                                                  \
+               pmdret = pmd_mkexec(pmdret);                                    \
+       else                                                                    \
+               pmdret = pmd_mknexec(pmdret);                                   \
+                                                                               \
+       if (pte_write(newprot))                                                 \
+               pmdret = pmd_mkwrite(pmdret);                                   \
+       else                                                                    \
+               pmdret = pmd_wrprotect(pmdret);                                 \
+                                                                               \
+       if (pte_young(newprot))                                                 \
+               pmdret = pmd_mkyoung(pmdret);                                   \
+       else                                                                    \
+               pmdret = pmd_mkold(pmdret);                                     \
+                                                                               \
+       pmdret;                                                                 \
+})
+
+#define pmd_hugewillfault(pmd) (       !pmd_young(pmd) ||      \
+                                       !pmd_write(pmd) ||      \
+                                       !pmd_dirty(pmd) )
+#define pmd_thp_or_huge(pmd)           ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#else
+#define HPAGE_SIZE 0
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd)   (0)
+#endif /* CONFIG_SYS_SUPPORTS_HUGETLBFS */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_mkhuge(pmd)                (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
+
+PMD_BIT_FUNC(mksplitting, |= PMD_DSECT_SPLITTING);
+#define pmd_trans_splitting(pmd)       (pmd_val(pmd) & PMD_DSECT_SPLITTING)
+#define pmd_trans_huge(pmd)            ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+       /*
+        * for a section, we need to mask off more of the pmd
+        * before looking up the pfn
+        */
+       if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+               return __phys_to_pfn(pmd_val(pmd) & HPAGE_MASK);
+       else
+               return __phys_to_pfn(pmd_val(pmd) & PHYS_MASK);
+}
+
+#define pfn_pmd(pfn,prot) pmd_modify(__pmd(__pfn_to_phys(pfn)),prot);
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot);
+
+static inline int has_transparent_hugepage(void)
+{
+       return 1;
+}
+
+#define _PMD_HUGE(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#define _PMD_HPAGE(pmd) (phys_to_page(pmd_val(pmd) & HPAGE_MASK))
+#else
+#define _PMD_HUGE(pmd) (0)
+#define _PMD_HPAGE(pmd) (0)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+       /*
+        * for a section, we need to mask off more of the pmd
+        * before looking up the page as it is a section descriptor.
+        */
+       if (_PMD_HUGE(pmd))
+               return _PMD_HPAGE(pmd);
+
+       return phys_to_page(pmd_val(pmd) & PHYS_MASK);
+}
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_PGTABLE_2LEVEL_H */