Merge branch 'stable-3.2' into pandora-3.2
authorGrazvydas Ignotas <notasas@gmail.com>
Fri, 2 May 2014 23:14:57 +0000 (02:14 +0300)
committerGrazvydas Ignotas <notasas@gmail.com>
Fri, 2 May 2014 23:14:57 +0000 (02:14 +0300)
Conflicts:
arch/arm/mm/proc-v7.S

1  2 
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable.h
arch/arm/mm/Kconfig
arch/arm/mm/mmu.c
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-v7-2level.S
kernel/exit.c
kernel/trace/blktrace.c
mm/hugetlb.c

  #define L_PTE_MT_DEV_NONSHARED        (_AT(pteval_t, 0x0c) << 2)      /* 1100 */
  #define L_PTE_MT_DEV_WC               (_AT(pteval_t, 0x09) << 2)      /* 1001 */
  #define L_PTE_MT_DEV_CACHED   (_AT(pteval_t, 0x0b) << 2)      /* 1011 */
+ #define L_PTE_MT_VECTORS      (_AT(pteval_t, 0x0f) << 2)      /* 1111 */
  #define L_PTE_MT_MASK         (_AT(pteval_t, 0x0f) << 2)
  
 +#ifndef __ASSEMBLY__
 +
 +/*
 + * The "pud_xxx()" functions here are trivial when the pmd is folded into
 + * the pud: the pud entry is never bad, always exists, and can't be set or
 + * cleared.
 + */
 +#define pud_none(pud)         (0)
 +#define pud_bad(pud)          (0)
 +#define pud_present(pud)      (1)
 +#define pud_clear(pudp)               do { } while (0)
 +#define set_pud(pud,pudp)     do { } while (0)
 +
 +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 +{
 +      return (pmd_t *)pud;
 +}
 +
 +#define pmd_bad(pmd)          (pmd_val(pmd) & 2)
 +
 +#define copy_pmd(pmdpd,pmdps)         \
 +      do {                            \
 +              pmdpd[0] = pmdps[0];    \
 +              pmdpd[1] = pmdps[1];    \
 +              flush_pmd_entry(pmdpd); \
 +      } while (0)
 +
 +#define pmd_clear(pmdp)                       \
 +      do {                            \
 +              pmdp[0] = __pmd(0);     \
 +              pmdp[1] = __pmd(0);     \
 +              clean_pmd_entry(pmdp);  \
 +      } while (0)
 +
 +/* we don't need complex calculations here as the pmd is folded into the pgd */
 +#define pmd_addr_end(addr,end) (end)
 +
 +#define pmd_present(pmd)        ((pmd_val(pmd) & PMD_TYPE_MASK) != PMD_TYPE_FAULT)
 +
 +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 +
 +
 +#ifdef CONFIG_SYS_SUPPORTS_HUGETLBFS
 +
 +/*
 + * now follows some of the definitions to allow huge page support, we can't put
 + * these in the hugetlb source files as they are also required for transparent
 + * hugepage support.
 + */
 +
 +#define HPAGE_SHIFT             PMD_SHIFT
 +#define HPAGE_SIZE              (_AC(1, UL) << HPAGE_SHIFT)
 +#define HPAGE_MASK              (~(HPAGE_SIZE - 1))
 +#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
 +
 +#define HUGE_LINUX_PTE_COUNT       (PAGE_OFFSET >> HPAGE_SHIFT)
 +#define HUGE_LINUX_PTE_SIZE        (HUGE_LINUX_PTE_COUNT * sizeof(pte_t *))
 +#define HUGE_LINUX_PTE_INDEX(addr) (addr >> HPAGE_SHIFT)
 +
 +/*
 + *  We re-purpose the following domain bits in the section descriptor
 + */
 +#define PMD_DOMAIN_MASK               (_AT(pmdval_t, 0xF) << 5)
 +#define PMD_DSECT_DIRTY               (_AT(pmdval_t, 1) << 5)
 +#define PMD_DSECT_AF          (_AT(pmdval_t, 1) << 6)
 +#define PMD_DSECT_SPLITTING   (_AT(pmdval_t, 1) << 7)
 +
 +#define PMD_BIT_FUNC(fn,op) \
 +static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
 +
 +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 +                              pmd_t *pmdp, pmd_t pmd)
 +{
 +      /*
 +       * we can sometimes be passed a pmd pointing to a level 2 descriptor
 +       * from collapse_huge_page.
 +       */
 +      if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
 +              pmdp[0] = __pmd(pmd_val(pmd));
 +              pmdp[1] = __pmd(pmd_val(pmd) + 256 * sizeof(pte_t));
 +      } else {
 +              pmdp[0] = __pmd(pmd_val(pmd));                  /* first 1M section  */
 +              pmdp[1] = __pmd(pmd_val(pmd) + SECTION_SIZE);   /* second 1M section */
 +      }
 +
 +      flush_pmd_entry(pmdp);
 +}
 +
 +extern pmdval_t arm_hugepmdprotval;
 +extern pteval_t arm_hugepteprotval;
 +
 +#define pmd_mkhuge(pmd)               (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
 +
 +PMD_BIT_FUNC(mkold, &= ~PMD_DSECT_AF);
 +PMD_BIT_FUNC(mkdirty, |= PMD_DSECT_DIRTY);
 +PMD_BIT_FUNC(mkclean, &= ~PMD_DSECT_DIRTY);
 +PMD_BIT_FUNC(mkyoung, |= PMD_DSECT_AF);
 +PMD_BIT_FUNC(mkwrite, |= PMD_SECT_AP_WRITE);
 +PMD_BIT_FUNC(wrprotect,       &= ~PMD_SECT_AP_WRITE);
 +PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
 +PMD_BIT_FUNC(mkexec,  &= ~PMD_SECT_XN);
 +PMD_BIT_FUNC(mknexec, |= PMD_SECT_XN);
 +
 +#define pmd_young(pmd)                        (pmd_val(pmd) & PMD_DSECT_AF)
 +#define pmd_write(pmd)                        (pmd_val(pmd) & PMD_SECT_AP_WRITE)
 +#define pmd_exec(pmd)                 (!(pmd_val(pmd) & PMD_SECT_XN))
 +#define pmd_dirty(pmd)                        (pmd_val(pmd) & PMD_DSECT_DIRTY)
 +
 +#define __HAVE_ARCH_PMD_WRITE
 +
 +#define pmd_modify(pmd, prot)                                                 \
 +({                                                                            \
 +      pmd_t pmdret = __pmd((pmd_val(pmd) & (PMD_MASK | PMD_DOMAIN_MASK))      \
 +              | arm_hugepmdprotval);                                          \
 +      pgprot_t inprot = prot;                                                 \
 +      pte_t newprot = __pte(pgprot_val(inprot));                              \
 +                                                                              \
 +      if (pte_dirty(newprot))                                                 \
 +              pmdret = pmd_mkdirty(pmdret);                                   \
 +      else                                                                    \
 +              pmdret = pmd_mkclean(pmdret);                                   \
 +                                                                              \
 +      if (pte_exec(newprot))                                                  \
 +              pmdret = pmd_mkexec(pmdret);                                    \
 +      else                                                                    \
 +              pmdret = pmd_mknexec(pmdret);                                   \
 +                                                                              \
 +      if (pte_write(newprot))                                                 \
 +              pmdret = pmd_mkwrite(pmdret);                                   \
 +      else                                                                    \
 +              pmdret = pmd_wrprotect(pmdret);                                 \
 +                                                                              \
 +      if (pte_young(newprot))                                                 \
 +              pmdret = pmd_mkyoung(pmdret);                                   \
 +      else                                                                    \
 +              pmdret = pmd_mkold(pmdret);                                     \
 +                                                                              \
 +      pmdret;                                                                 \
 +})
 +
 +#define pmd_hugewillfault(pmd) (      !pmd_young(pmd) ||      \
 +                                      !pmd_write(pmd) ||      \
 +                                      !pmd_dirty(pmd) )
 +#define pmd_thp_or_huge(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
 +#else
 +#define HPAGE_SIZE 0
 +#define pmd_hugewillfault(pmd)        (0)
 +#define pmd_thp_or_huge(pmd)  (0)
 +#endif /* CONFIG_SYS_SUPPORTS_HUGETLBFS */
 +
 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 +#define pmd_mkhuge(pmd)               (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT))
 +
 +PMD_BIT_FUNC(mksplitting, |= PMD_DSECT_SPLITTING);
 +#define pmd_trans_splitting(pmd)      (pmd_val(pmd) & PMD_DSECT_SPLITTING)
 +#define pmd_trans_huge(pmd)           ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
 +
 +static inline unsigned long pmd_pfn(pmd_t pmd)
 +{
 +      /*
 +       * for a section, we need to mask off more of the pmd
 +       * before looking up the pfn
 +       */
 +      if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
 +              return __phys_to_pfn(pmd_val(pmd) & HPAGE_MASK);
 +      else
 +              return __phys_to_pfn(pmd_val(pmd) & PHYS_MASK);
 +}
 +
 +#define pfn_pmd(pfn,prot) pmd_modify(__pmd(__pfn_to_phys(pfn)),prot);
 +#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot);
 +
 +static inline int has_transparent_hugepage(void)
 +{
 +      return 1;
 +}
 +
 +#define _PMD_HUGE(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT)
 +#define _PMD_HPAGE(pmd) (phys_to_page(pmd_val(pmd) & HPAGE_MASK))
 +#else
 +#define _PMD_HUGE(pmd) (0)
 +#define _PMD_HPAGE(pmd) (0)
 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 +
 +static inline struct page *pmd_page(pmd_t pmd)
 +{
 +      /*
 +       * for a section, we need to mask off more of the pmd
 +       * before looking up the page as it is a section descriptor.
 +       */
 +      if (_PMD_HUGE(pmd))
 +              return _PMD_HPAGE(pmd);
 +
 +      return phys_to_page(pmd_val(pmd) & PHYS_MASK);
 +}
 +
 +#endif /* __ASSEMBLY__ */
 +
  #endif /* _ASM_PGTABLE_2LEVEL_H */
Simple merge
Simple merge
Simple merge
Simple merge
index 74357cb,0000000..37d2189
mode 100644,000000..100644
--- /dev/null
@@@ -1,171 -1,0 +1,168 @@@
- #ifdef CONFIG_CPU_USE_DOMAINS
-       @ allow kernel read/write access to read-only user pages
-       tstne   r3, #PTE_EXT_APX
-       bicne   r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
- #endif
 +/*
 + * arch/arm/mm/proc-v7-2level.S
 + *
 + * Copyright (C) 2001 Deep Blue Solutions Ltd.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 + */
 +
 +#define TTB_S         (1 << 1)
 +#define TTB_RGN_NC    (0 << 3)
 +#define TTB_RGN_OC_WBWA       (1 << 3)
 +#define TTB_RGN_OC_WT (2 << 3)
 +#define TTB_RGN_OC_WB (3 << 3)
 +#define TTB_NOS               (1 << 5)
 +#define TTB_IRGN_NC   ((0 << 0) | (0 << 6))
 +#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
 +#define TTB_IRGN_WT   ((1 << 0) | (0 << 6))
 +#define TTB_IRGN_WB   ((1 << 0) | (1 << 6))
 +
 +/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
 +#define TTB_FLAGS_UP  TTB_IRGN_WB|TTB_RGN_OC_WB
 +#define PMD_FLAGS_UP  PMD_SECT_WB
 +
 +/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
 +#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
 +#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
 +
 +/*
 + *    cpu_v7_switch_mm(pgd_phys, tsk)
 + *
 + *    Set the translation table base pointer to be pgd_phys
 + *
 + *    - pgd_phys - physical address of new TTB
 + *
 + *    It is assumed that:
 + *    - we are not using split page tables
 + */
 +ENTRY(cpu_v7_switch_mm)
 +#ifdef CONFIG_MMU
 +      mov     r2, #0
 +      ldr     r1, [r1, #MM_CONTEXT_ID]        @ get mm->context.id
 +      ALT_SMP(orr     r0, r0, #TTB_FLAGS_SMP)
 +      ALT_UP(orr      r0, r0, #TTB_FLAGS_UP)
 +#ifdef CONFIG_ARM_ERRATA_430973
 +      mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
 +#endif
 +#ifdef CONFIG_ARM_ERRATA_754322
 +      dsb
 +#endif
 +      mcr     p15, 0, r2, c13, c0, 1          @ set reserved context ID
 +      isb
 +1:    mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
 +      isb
 +#ifdef CONFIG_ARM_ERRATA_754322
 +      dsb
 +#endif
 +      mcr     p15, 0, r1, c13, c0, 1          @ set context ID
 +      isb
 +#endif
 +      mov     pc, lr
 +ENDPROC(cpu_v7_switch_mm)
 +
 +/*
 + *    cpu_v7_set_pte_ext(ptep, pte)
 + *
 + *    Set a level 2 translation table entry.
 + *
 + *    - ptep  - pointer to level 2 translation table entry
 + *              (hardware version is stored at +2048 bytes)
 + *    - pte   - PTE value to store
 + *    - ext   - value for extended PTE bits
 + */
 +ENTRY(cpu_v7_set_pte_ext)
 +#ifdef CONFIG_MMU
 +      str     r1, [r0]                        @ linux version
 +
 +      bic     r3, r1, #0x000003f0
 +      bic     r3, r3, #PTE_TYPE_MASK
 +      orr     r3, r3, r2
 +      orr     r3, r3, #PTE_EXT_AP0 | 2
 +
 +      tst     r1, #1 << 4
 +      orrne   r3, r3, #PTE_EXT_TEX(1)
 +
 +      eor     r1, r1, #L_PTE_DIRTY
 +      tst     r1, #L_PTE_RDONLY | L_PTE_DIRTY
 +      orrne   r3, r3, #PTE_EXT_APX
 +
 +      tst     r1, #L_PTE_USER
 +      orrne   r3, r3, #PTE_EXT_AP1
 +
 +      tst     r1, #L_PTE_XN
 +      orrne   r3, r3, #PTE_EXT_XN
 +
 +      tst     r1, #L_PTE_YOUNG
 +      tstne   r1, #L_PTE_VALID
++      eorne   r1, r1, #L_PTE_NONE
++      tstne   r1, #L_PTE_NONE
 +      moveq   r3, #0
 +
 + ARM( str     r3, [r0, #2048]! )
 + THUMB(       add     r0, r0, #2048 )
 + THUMB(       str     r3, [r0] )
 +      mcr     p15, 0, r0, c7, c10, 1          @ flush_pte
 +#endif
 +      mov     pc, lr
 +ENDPROC(cpu_v7_set_pte_ext)
 +
 +      /*
 +       * Memory region attributes with SCTLR.TRE=1
 +       *
 +       *   n = TEX[0],C,B
 +       *   TR = PRRR[2n+1:2n]         - memory type
 +       *   IR = NMRR[2n+1:2n]         - inner cacheable property
 +       *   OR = NMRR[2n+17:2n+16]     - outer cacheable property
 +       *
 +       *                      n       TR      IR      OR
 +       *   UNCACHED           000     00
 +       *   BUFFERABLE         001     10      00      00
 +       *   WRITETHROUGH       010     10      10      10
 +       *   WRITEBACK          011     10      11      11
 +       *   reserved           110
 +       *   WRITEALLOC         111     10      01      01
 +       *   DEV_SHARED         100     01
 +       *   DEV_NONSHARED      100     01
 +       *   DEV_WC             001     10
 +       *   DEV_CACHED         011     10
 +       *
 +       * Other attributes:
 +       *
 +       *   DS0 = PRRR[16] = 0         - device shareable property
 +       *   DS1 = PRRR[17] = 1         - device shareable property
 +       *   NS0 = PRRR[18] = 0         - normal shareable property
 +       *   NS1 = PRRR[19] = 1         - normal shareable property
 +       *   NOS = PRRR[24+n] = 1       - not outer shareable
 +       */
 +.equ  PRRR,   0xff0a81a8
 +.equ  NMRR,   0x40e040e0
 +
 +      /*
 +       * Macro for setting up the TTBRx and TTBCR registers.
 +       * - \ttb0 and \ttb1 updated with the corresponding flags.
 +       */
 +      .macro  v7_ttb_setup, zero, ttbr0, ttbr1, tmp
 +      mcr     p15, 0, \zero, c2, c0, 2        @ TTB control register
 +      ALT_SMP(orr     \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
 +      ALT_UP(orr      \ttbr0, \ttbr0, #TTB_FLAGS_UP)
 +      ALT_SMP(orr     \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
 +      ALT_UP(orr      \ttbr1, \ttbr1, #TTB_FLAGS_UP)
 +      mcr     p15, 0, \ttbr1, c2, c0, 1       @ load TTB1
 +      .endm
 +
 +      __CPUINIT
 +
 +      /*   AT
 +       *  TFR   EV X F   I D LR    S
 +       * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
 +       * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
 +       *    1    0 110       0011 1100 .111 1101 < we want
 +       */
 +      .align  2
 +      .type   v7_crval, #object
 +v7_crval:
 +      crval   clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
 +
 +      .previous
diff --cc kernel/exit.c
Simple merge
Simple merge
diff --cc mm/hugetlb.c
Simple merge