net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / mm / pagewalk.c
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
5
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
7                           struct mm_walk *walk)
8 {
9         pte_t *pte;
10         int err = 0;
11
12         pte = pte_offset_map(pmd, addr);
13         for (;;) {
14                 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15                 if (err)
16                        break;
17                 addr += PAGE_SIZE;
18                 if (addr == end)
19                         break;
20                 pte++;
21         }
22
23         pte_unmap(pte);
24         return err;
25 }
26
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
28                           struct mm_walk *walk)
29 {
30         pmd_t *pmd;
31         unsigned long next;
32         int err = 0;
33
34         pmd = pmd_offset(pud, addr);
35         do {
36 again:
37                 next = pmd_addr_end(addr, end);
38                 if (pmd_none(*pmd)) {
39                         if (walk->pte_hole)
40                                 err = walk->pte_hole(addr, next, walk);
41                         if (err)
42                                 break;
43                         continue;
44                 }
45                 /*
46                  * This implies that each ->pmd_entry() handler
47                  * needs to know about pmd_trans_huge() pmds
48                  */
49                 if (walk->pmd_entry)
50                         err = walk->pmd_entry(pmd, addr, next, walk);
51                 if (err)
52                         break;
53
54                 /*
55                  * Check this here so we only break down trans_huge
56                  * pages when we _need_ to
57                  */
58                 if (!walk->pte_entry)
59                         continue;
60
61                 split_huge_page_pmd(walk->mm, pmd);
62                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
63                         goto again;
64                 err = walk_pte_range(pmd, addr, next, walk);
65                 if (err)
66                         break;
67         } while (pmd++, addr = next, addr != end);
68
69         return err;
70 }
71
72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
73                           struct mm_walk *walk)
74 {
75         pud_t *pud;
76         unsigned long next;
77         int err = 0;
78
79         pud = pud_offset(pgd, addr);
80         do {
81                 next = pud_addr_end(addr, end);
82                 if (pud_none_or_clear_bad(pud)) {
83                         if (walk->pte_hole)
84                                 err = walk->pte_hole(addr, next, walk);
85                         if (err)
86                                 break;
87                         continue;
88                 }
89                 if (walk->pud_entry)
90                         err = walk->pud_entry(pud, addr, next, walk);
91                 if (!err && (walk->pmd_entry || walk->pte_entry))
92                         err = walk_pmd_range(pud, addr, next, walk);
93                 if (err)
94                         break;
95         } while (pud++, addr = next, addr != end);
96
97         return err;
98 }
99
100 #ifdef CONFIG_HUGETLB_PAGE
101 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
102                                        unsigned long end)
103 {
104         unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
105         return boundary < end ? boundary : end;
106 }
107
108 static int walk_hugetlb_range(struct vm_area_struct *vma,
109                               unsigned long addr, unsigned long end,
110                               struct mm_walk *walk)
111 {
112         struct hstate *h = hstate_vma(vma);
113         unsigned long next;
114         unsigned long hmask = huge_page_mask(h);
115         pte_t *pte;
116         int err = 0;
117
118         do {
119                 next = hugetlb_entry_end(h, addr, end);
120                 pte = huge_pte_offset(walk->mm, addr & hmask);
121                 if (pte && walk->hugetlb_entry)
122                         err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
123                 if (err)
124                         return err;
125         } while (addr = next, addr != end);
126
127         return 0;
128 }
129
130 #else /* CONFIG_HUGETLB_PAGE */
131 static int walk_hugetlb_range(struct vm_area_struct *vma,
132                               unsigned long addr, unsigned long end,
133                               struct mm_walk *walk)
134 {
135         return 0;
136 }
137
138 #endif /* CONFIG_HUGETLB_PAGE */
139
140
141
142 /**
143  * walk_page_range - walk a memory map's page tables with a callback
144  * @mm: memory map to walk
145  * @addr: starting address
146  * @end: ending address
147  * @walk: set of callbacks to invoke for each level of the tree
148  *
149  * Recursively walk the page table for the memory area in a VMA,
150  * calling supplied callbacks. Callbacks are called in-order (first
151  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
152  * etc.). If lower-level callbacks are omitted, walking depth is reduced.
153  *
154  * Each callback receives an entry pointer and the start and end of the
155  * associated range, and a copy of the original mm_walk for access to
156  * the ->private or ->mm fields.
157  *
158  * Usually no locks are taken, but splitting transparent huge page may
159  * take page table lock. And the bottom level iterator will map PTE
160  * directories from highmem if necessary.
161  *
162  * If any callback returns a non-zero value, the walk is aborted and
163  * the return value is propagated back to the caller. Otherwise 0 is returned.
164  *
165  * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
166  * is !NULL.
167  */
168 int walk_page_range(unsigned long addr, unsigned long end,
169                     struct mm_walk *walk)
170 {
171         pgd_t *pgd;
172         unsigned long next;
173         int err = 0;
174
175         if (addr >= end)
176                 return err;
177
178         if (!walk->mm)
179                 return -EINVAL;
180
181         VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
182
183         pgd = pgd_offset(walk->mm, addr);
184         do {
185                 struct vm_area_struct *vma = NULL;
186
187                 next = pgd_addr_end(addr, end);
188
189                 /*
190                  * This function was not intended to be vma based.
191                  * But there are vma special cases to be handled:
192                  * - hugetlb vma's
193                  * - VM_PFNMAP vma's
194                  */
195                 vma = find_vma(walk->mm, addr);
196                 if (vma) {
197                         /*
198                          * There are no page structures backing a VM_PFNMAP
199                          * range, so do not allow split_huge_page_pmd().
200                          */
201                         if ((vma->vm_start <= addr) &&
202                             (vma->vm_flags & VM_PFNMAP)) {
203                                 if (walk->pte_hole)
204                                         err = walk->pte_hole(addr, next, walk);
205                                 if (err)
206                                         break;
207                                 pgd = pgd_offset(walk->mm, next);
208                                 continue;
209                         }
210                         /*
211                          * Handle hugetlb vma individually because pagetable
212                          * walk for the hugetlb page is dependent on the
213                          * architecture and we can't handled it in the same
214                          * manner as non-huge pages.
215                          */
216                         if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
217                             is_vm_hugetlb_page(vma)) {
218                                 if (vma->vm_end < next)
219                                         next = vma->vm_end;
220                                 /*
221                                  * Hugepage is very tightly coupled with vma,
222                                  * so walk through hugetlb entries within a
223                                  * given vma.
224                                  */
225                                 err = walk_hugetlb_range(vma, addr, next, walk);
226                                 if (err)
227                                         break;
228                                 pgd = pgd_offset(walk->mm, next);
229                                 continue;
230                         }
231                 }
232
233                 if (pgd_none_or_clear_bad(pgd)) {
234                         if (walk->pte_hole)
235                                 err = walk->pte_hole(addr, next, walk);
236                         if (err)
237                                 break;
238                         pgd++;
239                         continue;
240                 }
241                 if (walk->pgd_entry)
242                         err = walk->pgd_entry(pgd, addr, next, walk);
243                 if (!err &&
244                     (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
245                         err = walk_pud_range(pgd, addr, next, walk);
246                 if (err)
247                         break;
248                 pgd++;
249         } while (addr = next, addr != end);
250
251         return err;
252 }