pandora: defconfig: update
[pandora-kernel.git] / arch / s390 / include / asm / hugetlb.h
1 /*
2  *  IBM System z Huge TLB Page Support for Kernel.
3  *
4  *    Copyright IBM Corp. 2008
5  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6  */
7
8 #ifndef _ASM_S390_HUGETLB_H
9 #define _ASM_S390_HUGETLB_H
10
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13
14
15 #define is_hugepage_only_range(mm, addr, len)   0
16 #define hugetlb_free_pgd_range                  free_pgd_range
17 #define hugepages_supported()                   (MACHINE_HAS_HPAGE)
18
19 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
20                      pte_t *ptep, pte_t pte);
21
22 /*
23  * If the arch doesn't supply something else, assume that hugepage
24  * size aligned regions are ok without further preparation.
25  */
26 static inline int prepare_hugepage_range(struct file *file,
27                         unsigned long addr, unsigned long len)
28 {
29         if (len & ~HPAGE_MASK)
30                 return -EINVAL;
31         if (addr & ~HPAGE_MASK)
32                 return -EINVAL;
33         return 0;
34 }
35
36 #define hugetlb_prefault_arch_hook(mm)          do { } while (0)
37 #define arch_clear_hugepage_flags(page)         do { } while (0)
38
39 int arch_prepare_hugepage(struct page *page);
40 void arch_release_hugepage(struct page *page);
41
42 static inline pte_t huge_pte_wrprotect(pte_t pte)
43 {
44         pte_val(pte) |= _PAGE_RO;
45         return pte;
46 }
47
48 static inline int huge_pte_none(pte_t pte)
49 {
50         return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
51                 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
52 }
53
54 static inline pte_t huge_ptep_get(pte_t *ptep)
55 {
56         pte_t pte = *ptep;
57         unsigned long mask;
58
59         if (!MACHINE_HAS_HPAGE) {
60                 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
61                 if (ptep) {
62                         mask = pte_val(pte) &
63                                 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
64                         pte = pte_mkhuge(*ptep);
65                         pte_val(pte) |= mask;
66                 }
67         }
68         return pte;
69 }
70
71 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
72                                             unsigned long addr, pte_t *ptep)
73 {
74         pte_t pte = huge_ptep_get(ptep);
75
76         mm->context.flush_mm = 1;
77         pmd_clear((pmd_t *) ptep);
78         return pte;
79 }
80
81 static inline void __pmd_csp(pmd_t *pmdp)
82 {
83         register unsigned long reg2 asm("2") = pmd_val(*pmdp);
84         register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
85                                                _SEGMENT_ENTRY_INV;
86         register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
87
88         asm volatile(
89                 "       csp %1,%3"
90                 : "=m" (*pmdp)
91                 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
92         pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
93 }
94
95 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
96 {
97         unsigned long sto = (unsigned long) pmdp -
98                                 pmd_index(address) * sizeof(pmd_t);
99
100         if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
101                 asm volatile(
102                         "       .insn   rrf,0xb98e0000,%2,%3,0,0"
103                         : "=m" (*pmdp)
104                         : "m" (*pmdp), "a" (sto),
105                           "a" ((address & HPAGE_MASK))
106                 );
107         }
108         pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
109 }
110
111 static inline void huge_ptep_invalidate(struct mm_struct *mm,
112                                         unsigned long address, pte_t *ptep)
113 {
114         pmd_t *pmdp = (pmd_t *) ptep;
115
116         if (MACHINE_HAS_IDTE)
117                 __pmd_idte(address, pmdp);
118         else
119                 __pmd_csp(pmdp);
120 }
121
122 #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
123 ({                                                                          \
124         int __changed = !pte_same(huge_ptep_get(__ptep), __entry);          \
125         if (__changed) {                                                    \
126                 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep);       \
127                 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);   \
128         }                                                                   \
129         __changed;                                                          \
130 })
131
132 #define huge_ptep_set_wrprotect(__mm, __addr, __ptep)                   \
133 ({                                                                      \
134         pte_t __pte = huge_ptep_get(__ptep);                            \
135         if (pte_write(__pte)) {                                         \
136                 (__mm)->context.flush_mm = 1;                           \
137                 if (atomic_read(&(__mm)->context.attach_count) > 1 ||   \
138                     (__mm) != current->active_mm)                       \
139                         huge_ptep_invalidate(__mm, __addr, __ptep);     \
140                 set_huge_pte_at(__mm, __addr, __ptep,                   \
141                                 huge_pte_wrprotect(__pte));             \
142         }                                                               \
143 })
144
145 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
146                                          unsigned long address, pte_t *ptep)
147 {
148         huge_ptep_invalidate(vma->vm_mm, address, ptep);
149 }
150
151 #endif /* _ASM_S390_HUGETLB_H */