2 * linux/mm/mmu_notifier.c
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <clameter@sgi.com>
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/export.h>
16 #include <linux/err.h>
17 #include <linux/srcu.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
22 /* global SRCU for all MMs */
23 static struct srcu_struct srcu;
26 * This function can't run concurrently against mmu_notifier_register
27 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
28 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
29 * in parallel despite there being no task using this mm any more,
30 * through the vmas outside of the exit_mmap context, such as with
31 * vmtruncate. This serializes against mmu_notifier_unregister with
32 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
33 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
34 * can't go away from under us as exit_mmap holds an mm_count pin
37 void __mmu_notifier_release(struct mm_struct *mm)
39 struct mmu_notifier *mn;
43 * srcu_read_lock() here will block synchronize_srcu() in
44 * mmu_notifier_unregister() until all registered
45 * ->release() callouts this function makes have
48 id = srcu_read_lock(&srcu);
49 spin_lock(&mm->mmu_notifier_mm->lock);
50 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
51 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
56 * Unlink. This will prevent mmu_notifier_unregister()
57 * from also making the ->release() callout.
59 hlist_del_init_rcu(&mn->hlist);
60 spin_unlock(&mm->mmu_notifier_mm->lock);
63 * Clear sptes. (see 'release' description in mmu_notifier.h)
66 mn->ops->release(mn, mm);
68 spin_lock(&mm->mmu_notifier_mm->lock);
70 spin_unlock(&mm->mmu_notifier_mm->lock);
73 * All callouts to ->release() which we have done are complete.
74 * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
76 srcu_read_unlock(&srcu, id);
79 * mmu_notifier_unregister() may have unlinked a notifier and may
80 * still be calling out to it. Additionally, other notifiers
81 * may have been active via vmtruncate() et. al. Block here
82 * to ensure that all notifier callouts for this mm have been
83 * completed and the sptes are really cleaned up before returning
86 synchronize_srcu(&srcu);
90 * If no young bitflag is supported by the hardware, ->clear_flush_young can
91 * unmap the address and return 1 or 0 depending if the mapping previously
94 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
95 unsigned long address)
97 struct mmu_notifier *mn;
101 id = srcu_read_lock(&srcu);
102 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
103 if (mn->ops->clear_flush_young)
104 young |= mn->ops->clear_flush_young(mn, mm, address);
106 srcu_read_unlock(&srcu, id);
111 int __mmu_notifier_test_young(struct mm_struct *mm,
112 unsigned long address)
114 struct mmu_notifier *mn;
115 struct hlist_node *n;
118 id = srcu_read_lock(&srcu);
119 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
120 if (mn->ops->test_young) {
121 young = mn->ops->test_young(mn, mm, address);
126 srcu_read_unlock(&srcu, id);
131 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
134 struct mmu_notifier *mn;
135 struct hlist_node *n;
138 id = srcu_read_lock(&srcu);
139 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
140 if (mn->ops->change_pte)
141 mn->ops->change_pte(mn, mm, address, pte);
143 * Some drivers don't have change_pte,
144 * so we must call invalidate_page in that case.
146 else if (mn->ops->invalidate_page)
147 mn->ops->invalidate_page(mn, mm, address);
149 srcu_read_unlock(&srcu, id);
152 void __mmu_notifier_invalidate_page(struct mm_struct *mm,
153 unsigned long address)
155 struct mmu_notifier *mn;
156 struct hlist_node *n;
159 id = srcu_read_lock(&srcu);
160 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
161 if (mn->ops->invalidate_page)
162 mn->ops->invalidate_page(mn, mm, address);
164 srcu_read_unlock(&srcu, id);
167 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
168 unsigned long start, unsigned long end)
170 struct mmu_notifier *mn;
171 struct hlist_node *n;
174 id = srcu_read_lock(&srcu);
175 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
176 if (mn->ops->invalidate_range_start)
177 mn->ops->invalidate_range_start(mn, mm, start, end);
179 srcu_read_unlock(&srcu, id);
182 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
183 unsigned long start, unsigned long end)
185 struct mmu_notifier *mn;
186 struct hlist_node *n;
189 id = srcu_read_lock(&srcu);
190 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
191 if (mn->ops->invalidate_range_end)
192 mn->ops->invalidate_range_end(mn, mm, start, end);
194 srcu_read_unlock(&srcu, id);
197 static int do_mmu_notifier_register(struct mmu_notifier *mn,
198 struct mm_struct *mm,
201 struct mmu_notifier_mm *mmu_notifier_mm;
204 BUG_ON(atomic_read(&mm->mm_users) <= 0);
207 * Verify that mmu_notifier_init() already run and the global srcu is
210 BUG_ON(!srcu.per_cpu_ref);
213 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
214 if (unlikely(!mmu_notifier_mm))
218 down_write(&mm->mmap_sem);
219 ret = mm_take_all_locks(mm);
223 if (!mm_has_notifiers(mm)) {
224 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
225 spin_lock_init(&mmu_notifier_mm->lock);
226 mm->mmu_notifier_mm = mmu_notifier_mm;
227 mmu_notifier_mm = NULL;
229 atomic_inc(&mm->mm_count);
232 * Serialize the update against mmu_notifier_unregister. A
233 * side note: mmu_notifier_release can't run concurrently with
234 * us because we hold the mm_users pin (either implicitly as
235 * current->mm or explicitly with get_task_mm() or similar).
236 * We can't race against any other mmu notifier method either
237 * thanks to mm_take_all_locks().
239 spin_lock(&mm->mmu_notifier_mm->lock);
240 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
241 spin_unlock(&mm->mmu_notifier_mm->lock);
243 mm_drop_all_locks(mm);
246 up_write(&mm->mmap_sem);
247 /* kfree() does nothing if mmu_notifier_mm is NULL */
248 kfree(mmu_notifier_mm);
250 BUG_ON(atomic_read(&mm->mm_users) <= 0);
255 * Must not hold mmap_sem nor any other VM related lock when calling
256 * this registration function. Must also ensure mm_users can't go down
257 * to zero while this runs to avoid races with mmu_notifier_release,
258 * so mm has to be current->mm or the mm should be pinned safely such
259 * as with get_task_mm(). If the mm is not current->mm, the mm_users
260 * pin should be released by calling mmput after mmu_notifier_register
261 * returns. mmu_notifier_unregister must be always called to
262 * unregister the notifier. mm_count is automatically pinned to allow
263 * mmu_notifier_unregister to safely run at any time later, before or
264 * after exit_mmap. ->release will always be called before exit_mmap
267 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
269 return do_mmu_notifier_register(mn, mm, 1);
271 EXPORT_SYMBOL_GPL(mmu_notifier_register);
274 * Same as mmu_notifier_register but here the caller must hold the
275 * mmap_sem in write mode.
277 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
279 return do_mmu_notifier_register(mn, mm, 0);
281 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
283 /* this is called after the last mmu_notifier_unregister() returned */
284 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
286 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
287 kfree(mm->mmu_notifier_mm);
288 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
292 * This releases the mm_count pin automatically and frees the mm
293 * structure if it was the last user of it. It serializes against
294 * running mmu notifiers with SRCU and against mmu_notifier_unregister
295 * with the unregister lock + SRCU. All sptes must be dropped before
296 * calling mmu_notifier_unregister. ->release or any other notifier
297 * method may be invoked concurrently with mmu_notifier_unregister,
298 * and only after mmu_notifier_unregister returned we're guaranteed
299 * that ->release or any other method can't run anymore.
301 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
303 BUG_ON(atomic_read(&mm->mm_count) <= 0);
305 spin_lock(&mm->mmu_notifier_mm->lock);
306 if (!hlist_unhashed(&mn->hlist)) {
310 * Ensure we synchronize up with __mmu_notifier_release().
312 id = srcu_read_lock(&srcu);
314 hlist_del_rcu(&mn->hlist);
315 spin_unlock(&mm->mmu_notifier_mm->lock);
317 if (mn->ops->release)
318 mn->ops->release(mn, mm);
321 * Allow __mmu_notifier_release() to complete.
323 srcu_read_unlock(&srcu, id);
325 spin_unlock(&mm->mmu_notifier_mm->lock);
328 * Wait for any running method to finish, including ->release() if it
329 * was run by __mmu_notifier_release() instead of us.
331 synchronize_srcu(&srcu);
333 BUG_ON(atomic_read(&mm->mm_count) <= 0);
337 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
339 static int __init mmu_notifier_init(void)
341 return init_srcu_struct(&srcu);
344 module_init(mmu_notifier_init);