20c8ccb1975b8 (Thomas Gleixner 2019-06-04 10:11:32 +0200 1) // SPDX-License-Identifier: GPL-2.0-only
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 2) /*
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 3) * linux/mm/mmu_notifier.c
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 4) *
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 5) * Copyright (C) 2008 Qumranet, Inc.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 6) * Copyright (C) 2008 SGI
93e205a728e6c (Christoph Lameter 2016-03-17 14:21:15 -0700 7) * Christoph Lameter <cl@linux.com>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 8) */
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 9)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 10) #include <linux/rculist.h>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 11) #include <linux/mmu_notifier.h>
b95f1b31b7558 (Paul Gortmaker 2011-10-16 02:01:52 -0400 12) #include <linux/export.h>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 13) #include <linux/mm.h>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 14) #include <linux/err.h>
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 15) #include <linux/interval_tree.h>
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 16) #include <linux/srcu.h>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 17) #include <linux/rcupdate.h>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 18) #include <linux/sched.h>
6e84f31522f93 (Ingo Molnar 2017-02-08 18:51:29 +0100 19) #include <linux/sched/mm.h>
5a0e3ad6af866 (Tejun Heo 2010-03-24 17:04:11 +0900 20) #include <linux/slab.h>
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 21)
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 22) /* global SRCU for all MMs */
dde8da6cffe73 (Paul E. McKenney 2017-03-25 10:42:07 -0700 23) DEFINE_STATIC_SRCU(srcu);
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 24)
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 25) #ifdef CONFIG_LOCKDEP
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 26) struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 27) .name = "mmu_notifier_invalidate_range_start"
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 28) };
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 29) #endif
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 30)
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 31) /*
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 32) * The mmu_notifier_subscriptions structure is allocated and installed in
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 33) * mm->notifier_subscriptions inside the mm_take_all_locks() protected
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 34) * critical section and it's released only when mm_count reaches zero
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 35) * in mmdrop().
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 36) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 37) struct mmu_notifier_subscriptions {
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 38) /* all mmu notifiers registered in this mm are queued in this list */
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 39) struct hlist_head list;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 40) bool has_itree;
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 41) /* to serialize the list modifications and hlist_unhashed */
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 42) spinlock_t lock;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 43) unsigned long invalidate_seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 44) unsigned long active_invalidate_ranges;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 45) struct rb_root_cached itree;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 46) wait_queue_head_t wq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 47) struct hlist_head deferred_list;
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 48) };
56f434f40f059 (Jason Gunthorpe 2019-11-12 16:22:18 -0400 49)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 50) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 51) * This is a collision-retry read-side/write-side 'lock', a lot like a
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 52) * seqcount, however this allows multiple write-sides to hold it at
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 53) * once. Conceptually the write side is protecting the values of the PTEs in
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 54) * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 55) * writer exists.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 56) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 57) * Note that the core mm creates nested invalidate_range_start()/end() regions
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 58) * within the same thread, and runs invalidate_range_start()/end() in parallel
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 59) * on multiple CPUs. This is designed to not reduce concurrency or block
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 60) * progress on the mm side.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 61) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 62) * As a secondary function, holding the full write side also serves to prevent
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 63) * writers for the itree, this is an optimization to avoid extra locking
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 64) * during invalidate_range_start/end notifiers.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 65) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 66) * The write side has two states, fully excluded:
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 67) * - mm->active_invalidate_ranges != 0
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 68) * - subscriptions->invalidate_seq & 1 == True (odd)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 69) * - some range on the mm_struct is being invalidated
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 70) * - the itree is not allowed to change
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 71) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 72) * And partially excluded:
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 73) * - mm->active_invalidate_ranges != 0
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 74) * - subscriptions->invalidate_seq & 1 == False (even)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 75) * - some range on the mm_struct is being invalidated
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 76) * - the itree is allowed to change
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 77) *
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 78) * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 79) * seq |= 1 # Begin writing
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 80) * seq++ # Release the writing state
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 81) * seq & 1 # True if a writer exists
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 82) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 83) * The later state avoids some expensive work on inv_end in the common case of
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 84) * no mmu_interval_notifier monitoring the VA.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 85) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 86) static bool
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 87) mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 88) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 89) lockdep_assert_held(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 90) return subscriptions->invalidate_seq & 1;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 91) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 92)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 93) static struct mmu_interval_notifier *
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 94) mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 95) const struct mmu_notifier_range *range,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 96) unsigned long *seq)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 97) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 98) struct interval_tree_node *node;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 99) struct mmu_interval_notifier *res = NULL;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 100)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 101) spin_lock(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 102) subscriptions->active_invalidate_ranges++;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 103) node = interval_tree_iter_first(&subscriptions->itree, range->start,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 104) range->end - 1);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 105) if (node) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 106) subscriptions->invalidate_seq |= 1;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 107) res = container_of(node, struct mmu_interval_notifier,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 108) interval_tree);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 109) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 110)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 111) *seq = subscriptions->invalidate_seq;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 112) spin_unlock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 113) return res;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 114) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 115)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 116) static struct mmu_interval_notifier *
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 117) mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 118) const struct mmu_notifier_range *range)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 119) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 120) struct interval_tree_node *node;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 121)
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 122) node = interval_tree_iter_next(&interval_sub->interval_tree,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 123) range->start, range->end - 1);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 124) if (!node)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 125) return NULL;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 126) return container_of(node, struct mmu_interval_notifier, interval_tree);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 127) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 128)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 129) static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 130) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 131) struct mmu_interval_notifier *interval_sub;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 132) struct hlist_node *next;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 133)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 134) spin_lock(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 135) if (--subscriptions->active_invalidate_ranges ||
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 136) !mn_itree_is_invalidating(subscriptions)) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 137) spin_unlock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 138) return;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 139) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 140)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 141) /* Make invalidate_seq even */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 142) subscriptions->invalidate_seq++;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 143)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 144) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 145) * The inv_end incorporates a deferred mechanism like rtnl_unlock().
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 146) * Adds and removes are queued until the final inv_end happens then
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 147) * they are progressed. This arrangement for tree updates is used to
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 148) * avoid using a blocking lock during invalidate_range_start.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 149) */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 150) hlist_for_each_entry_safe(interval_sub, next,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 151) &subscriptions->deferred_list,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 152) deferred_item) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 153) if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 154) interval_tree_insert(&interval_sub->interval_tree,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 155) &subscriptions->itree);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 156) else
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 157) interval_tree_remove(&interval_sub->interval_tree,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 158) &subscriptions->itree);
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 159) hlist_del(&interval_sub->deferred_item);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 160) }
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 161) spin_unlock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 162)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 163) wake_up_all(&subscriptions->wq);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 164) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 165)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 166) /**
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 167) * mmu_interval_read_begin - Begin a read side critical section against a VA
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 168) * range
d49653f35adff (Krzysztof Kozlowski 2020-08-11 18:32:09 -0700 169) * @interval_sub: The interval subscription
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 170) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 171) * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 172) * collision-retry scheme similar to seqcount for the VA range under
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 173) * subscription. If the mm invokes invalidation during the critical section
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 174) * then mmu_interval_read_retry() will return true.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 175) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 176) * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 177) * require a blocking context. The critical region formed by this can sleep,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 178) * and the required 'user_lock' can also be a sleeping lock.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 179) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 180) * The caller is required to provide a 'user_lock' to serialize both teardown
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 181) * and setup.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 182) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 183) * The return value should be passed to mmu_interval_read_retry().
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 184) */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 185) unsigned long
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 186) mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 187) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 188) struct mmu_notifier_subscriptions *subscriptions =
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 189) interval_sub->mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 190) unsigned long seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 191) bool is_invalidating;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 192)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 193) /*
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 194) * If the subscription has a different seq value under the user_lock
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 195) * than we started with then it has collided.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 196) *
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 197) * If the subscription currently has the same seq value as the
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 198) * subscriptions seq, then it is currently between
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 199) * invalidate_start/end and is colliding.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 200) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 201) * The locking looks broadly like this:
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 202) * mn_tree_invalidate_start(): mmu_interval_read_begin():
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 203) * spin_lock
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 204) * seq = READ_ONCE(interval_sub->invalidate_seq);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 205) * seq == subs->invalidate_seq
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 206) * spin_unlock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 207) * spin_lock
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 208) * seq = ++subscriptions->invalidate_seq
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 209) * spin_unlock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 210) * op->invalidate_range():
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 211) * user_lock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 212) * mmu_interval_set_seq()
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 213) * interval_sub->invalidate_seq = seq
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 214) * user_unlock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 215) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 216) * [Required: mmu_interval_read_retry() == true]
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 217) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 218) * mn_itree_inv_end():
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 219) * spin_lock
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 220) * seq = ++subscriptions->invalidate_seq
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 221) * spin_unlock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 222) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 223) * user_lock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 224) * mmu_interval_read_retry():
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 225) * interval_sub->invalidate_seq != seq
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 226) * user_unlock
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 227) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 228) * Barriers are not needed here as any races here are closed by an
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 229) * eventual mmu_interval_read_retry(), which provides a barrier via the
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 230) * user_lock.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 231) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 232) spin_lock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 233) /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 234) seq = READ_ONCE(interval_sub->invalidate_seq);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 235) is_invalidating = seq == subscriptions->invalidate_seq;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 236) spin_unlock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 237)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 238) /*
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 239) * interval_sub->invalidate_seq must always be set to an odd value via
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 240) * mmu_interval_set_seq() using the provided cur_seq from
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 241) * mn_itree_inv_start_range(). This ensures that if seq does wrap we
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 242) * will always clear the below sleep in some reasonable time as
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 243) * subscriptions->invalidate_seq is even in the idle state.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 244) */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 245) lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 246) lock_map_release(&__mmu_notifier_invalidate_range_start_map);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 247) if (is_invalidating)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 248) wait_event(subscriptions->wq,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 249) READ_ONCE(subscriptions->invalidate_seq) != seq);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 250)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 251) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 252) * Notice that mmu_interval_read_retry() can already be true at this
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 253) * point, avoiding loops here allows the caller to provide a global
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 254) * time bound.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 255) */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 256)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 257) return seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 258) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 259) EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 260)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 261) static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 262) struct mm_struct *mm)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 263) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 264) struct mmu_notifier_range range = {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 265) .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 266) .event = MMU_NOTIFY_RELEASE,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 267) .mm = mm,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 268) .start = 0,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 269) .end = ULONG_MAX,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 270) };
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 271) struct mmu_interval_notifier *interval_sub;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 272) unsigned long cur_seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 273) bool ret;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 274)
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 275) for (interval_sub =
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 276) mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 277) interval_sub;
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 278) interval_sub = mn_itree_inv_next(interval_sub, &range)) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 279) ret = interval_sub->ops->invalidate(interval_sub, &range,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 280) cur_seq);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 281) WARN_ON(!ret);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 282) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 283)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 284) mn_itree_inv_end(subscriptions);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 285) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 286)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 287) /*
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 288) * This function can't run concurrently against mmu_notifier_register
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 289) * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 290) * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 291) * in parallel despite there being no task using this mm any more,
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 292) * through the vmas outside of the exit_mmap context, such as with
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 293) * vmtruncate. This serializes against mmu_notifier_unregister with
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 294) * the notifier_subscriptions->lock in addition to SRCU and it serializes
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 295) * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 296) * can't go away from under us as exit_mmap holds an mm_count pin
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 297) * itself.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 298) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 299) static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 300) struct mm_struct *mm)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 301) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 302) struct mmu_notifier *subscription;
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 303) int id;
3ad3d901bbcfb (Xiao Guangrong 2012-07-31 16:45:52 -0700 304)
3ad3d901bbcfb (Xiao Guangrong 2012-07-31 16:45:52 -0700 305) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 306) * SRCU here will block mmu_notifier_unregister until
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 307) * ->release returns.
3ad3d901bbcfb (Xiao Guangrong 2012-07-31 16:45:52 -0700 308) */
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 309) id = srcu_read_lock(&srcu);
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 310) hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 311) srcu_read_lock_held(&srcu))
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 312) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 313) * If ->release runs before mmu_notifier_unregister it must be
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 314) * handled, as it's the only way for the driver to flush all
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 315) * existing sptes and stop the driver from establishing any more
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 316) * sptes before all the pages in the mm are freed.
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 317) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 318) if (subscription->ops->release)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 319) subscription->ops->release(subscription, mm);
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 320)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 321) spin_lock(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 322) while (unlikely(!hlist_empty(&subscriptions->list))) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 323) subscription = hlist_entry(subscriptions->list.first,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 324) struct mmu_notifier, hlist);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 325) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 326) * We arrived before mmu_notifier_unregister so
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 327) * mmu_notifier_unregister will do nothing other than to wait
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 328) * for ->release to finish and for mmu_notifier_unregister to
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 329) * return.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 330) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 331) hlist_del_init_rcu(&subscription->hlist);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 332) }
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 333) spin_unlock(&subscriptions->lock);
b972216e27d1c (Peter Zijlstra 2014-08-06 16:08:20 -0700 334) srcu_read_unlock(&srcu, id);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 335)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 336) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 337) * synchronize_srcu here prevents mmu_notifier_release from returning to
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 338) * exit_mmap (which would proceed with freeing all pages in the mm)
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 339) * until the ->release method returns, if it was invoked by
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 340) * mmu_notifier_unregister.
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 341) *
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 342) * The notifier_subscriptions can't go away from under us because
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 343) * one mm_count is held by exit_mmap.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 344) */
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 345) synchronize_srcu(&srcu);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 346) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 347)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 348) void __mmu_notifier_release(struct mm_struct *mm)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 349) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 350) struct mmu_notifier_subscriptions *subscriptions =
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 351) mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 352)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 353) if (subscriptions->has_itree)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 354) mn_itree_release(subscriptions, mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 355)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 356) if (!hlist_empty(&subscriptions->list))
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 357) mn_hlist_release(subscriptions, mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 358) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 359)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 360) /*
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 361) * If no young bitflag is supported by the hardware, ->clear_flush_young can
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 362) * unmap the address and return 1 or 0 depending if the mapping previously
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 363) * existed or not.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 364) */
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 365) int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
57128468080a8 (Andres Lagar-Cavilla 2014-09-22 14:54:42 -0700 366) unsigned long start,
57128468080a8 (Andres Lagar-Cavilla 2014-09-22 14:54:42 -0700 367) unsigned long end)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 368) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 369) struct mmu_notifier *subscription;
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 370) int young = 0, id;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 371)
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 372) id = srcu_read_lock(&srcu);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 373) hlist_for_each_entry_rcu(subscription,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 374) &mm->notifier_subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 375) srcu_read_lock_held(&srcu)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 376) if (subscription->ops->clear_flush_young)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 377) young |= subscription->ops->clear_flush_young(
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 378) subscription, mm, start, end);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 379) }
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 380) srcu_read_unlock(&srcu, id);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 381)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 382) return young;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 383) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 384)
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 385) int __mmu_notifier_clear_young(struct mm_struct *mm,
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 386) unsigned long start,
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 387) unsigned long end)
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 388) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 389) struct mmu_notifier *subscription;
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 390) int young = 0, id;
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 391)
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 392) id = srcu_read_lock(&srcu);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 393) hlist_for_each_entry_rcu(subscription,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 394) &mm->notifier_subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 395) srcu_read_lock_held(&srcu)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 396) if (subscription->ops->clear_young)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 397) young |= subscription->ops->clear_young(subscription,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 398) mm, start, end);
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 399) }
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 400) srcu_read_unlock(&srcu, id);
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 401)
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 402) return young;
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 403) }
1d7715c676a15 (Vladimir Davydov 2015-09-09 15:35:41 -0700 404)
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 405) int __mmu_notifier_test_young(struct mm_struct *mm,
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 406) unsigned long address)
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 407) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 408) struct mmu_notifier *subscription;
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 409) int young = 0, id;
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 410)
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 411) id = srcu_read_lock(&srcu);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 412) hlist_for_each_entry_rcu(subscription,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 413) &mm->notifier_subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 414) srcu_read_lock_held(&srcu)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 415) if (subscription->ops->test_young) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 416) young = subscription->ops->test_young(subscription, mm,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 417) address);
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 418) if (young)
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 419) break;
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 420) }
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 421) }
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 422) srcu_read_unlock(&srcu, id);
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 423)
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 424) return young;
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 425) }
8ee53820edfd1 (Andrea Arcangeli 2011-01-13 15:47:10 -0800 426)
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 427) void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 428) pte_t pte)
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 429) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 430) struct mmu_notifier *subscription;
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 431) int id;
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 432)
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 433) id = srcu_read_lock(&srcu);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 434) hlist_for_each_entry_rcu(subscription,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 435) &mm->notifier_subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 436) srcu_read_lock_held(&srcu)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 437) if (subscription->ops->change_pte)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 438) subscription->ops->change_pte(subscription, mm, address,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 439) pte);
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 440) }
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 441) srcu_read_unlock(&srcu, id);
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 442) }
828502d300730 (Izik Eidus 2009-09-21 17:01:51 -0700 443)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 444) static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 445) const struct mmu_notifier_range *range)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 446) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 447) struct mmu_interval_notifier *interval_sub;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 448) unsigned long cur_seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 449)
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 450) for (interval_sub =
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 451) mn_itree_inv_start_range(subscriptions, range, &cur_seq);
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 452) interval_sub;
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 453) interval_sub = mn_itree_inv_next(interval_sub, range)) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 454) bool ret;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 455)
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 456) ret = interval_sub->ops->invalidate(interval_sub, range,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 457) cur_seq);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 458) if (!ret) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 459) if (WARN_ON(mmu_notifier_range_blockable(range)))
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 460) continue;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 461) goto out_would_block;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 462) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 463) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 464) return 0;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 465)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 466) out_would_block:
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 467) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 468) * On -EAGAIN the non-blocking caller is not allowed to call
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 469) * invalidate_range_end()
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 470) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 471) mn_itree_inv_end(subscriptions);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 472) return -EAGAIN;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 473) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 474)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 475) static int mn_hlist_invalidate_range_start(
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 476) struct mmu_notifier_subscriptions *subscriptions,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 477) struct mmu_notifier_range *range)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 478) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 479) struct mmu_notifier *subscription;
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 480) int ret = 0;
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 481) int id;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 482)
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 483) id = srcu_read_lock(&srcu);
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 484) hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 485) srcu_read_lock_held(&srcu)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 486) const struct mmu_notifier_ops *ops = subscription->ops;
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 487)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 488) if (ops->invalidate_range_start) {
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 489) int _ret;
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 490)
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 491) if (!mmu_notifier_range_blockable(range))
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 492) non_block_start();
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 493) _ret = ops->invalidate_range_start(subscription, range);
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 494) if (!mmu_notifier_range_blockable(range))
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 495) non_block_end();
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 496) if (_ret) {
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 497) pr_info("%pS callback failed with %d in %sblockable context.\n",
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 498) ops->invalidate_range_start, _ret,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 499) !mmu_notifier_range_blockable(range) ?
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 500) "non-" :
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 501) "");
8402ce61bec28 (Daniel Vetter 2019-08-14 22:20:23 +0200 502) WARN_ON(mmu_notifier_range_blockable(range) ||
df2ec7641bd03 (Jason Gunthorpe 2019-11-05 21:16:37 -0800 503) _ret != -EAGAIN);
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 504) /*
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 505) * We call all the notifiers on any EAGAIN,
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 506) * there is no way for a notifier to know if
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 507) * its start method failed, thus a start that
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 508) * does EAGAIN can't also do end.
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 509) */
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 510) WARN_ON(ops->invalidate_range_end);
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 511) ret = _ret;
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 512) }
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 513) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 514) }
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 515)
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 516) if (ret) {
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 517) /*
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 518) * Must be non-blocking to get here. If there are multiple
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 519) * notifiers and one or more failed start, any that succeeded
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 520) * start are expecting their end to be called. Do so now.
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 521) */
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 522) hlist_for_each_entry_rcu(subscription, &subscriptions->list,
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 523) hlist, srcu_read_lock_held(&srcu)) {
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 524) if (!subscription->ops->invalidate_range_end)
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 525) continue;
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 526)
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 527) subscription->ops->invalidate_range_end(subscription,
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 528) range);
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 529) }
c2655835fd8ca (Sean Christopherson 2021-03-24 21:37:23 -0700 530) }
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 531) srcu_read_unlock(&srcu, id);
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 532)
93065ac753e44 (Michal Hocko 2018-08-21 21:52:33 -0700 533) return ret;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 534) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 535)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 536) int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 537) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 538) struct mmu_notifier_subscriptions *subscriptions =
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 539) range->mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 540) int ret;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 541)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 542) if (subscriptions->has_itree) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 543) ret = mn_itree_invalidate(subscriptions, range);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 544) if (ret)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 545) return ret;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 546) }
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 547) if (!hlist_empty(&subscriptions->list))
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 548) return mn_hlist_invalidate_range_start(subscriptions, range);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 549) return 0;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 550) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 551)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 552) static void
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 553) mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 554) struct mmu_notifier_range *range, bool only_end)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 555) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 556) struct mmu_notifier *subscription;
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 557) int id;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 558)
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 559) id = srcu_read_lock(&srcu);
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 560) hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 561) srcu_read_lock_held(&srcu)) {
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 562) /*
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 563) * Call invalidate_range here too to avoid the need for the
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 564) * subsystem of having to register an invalidate_range_end
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 565) * call-back when there is invalidate_range already. Usually a
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 566) * subsystem registers either invalidate_range_start()/end() or
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 567) * invalidate_range(), so this will be no additional overhead
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 568) * (besides the pointer check).
4645b9fe84bf4 (Jérôme Glisse 2017-11-15 17:34:11 -0800 569) *
4645b9fe84bf4 (Jérôme Glisse 2017-11-15 17:34:11 -0800 570) * We skip call to invalidate_range() if we know it is safe ie
4645b9fe84bf4 (Jérôme Glisse 2017-11-15 17:34:11 -0800 571) * call site use mmu_notifier_invalidate_range_only_end() which
4645b9fe84bf4 (Jérôme Glisse 2017-11-15 17:34:11 -0800 572) * is safe to do when we know that a call to invalidate_range()
4645b9fe84bf4 (Jérôme Glisse 2017-11-15 17:34:11 -0800 573) * already happen under page table lock.
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 574) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 575) if (!only_end && subscription->ops->invalidate_range)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 576) subscription->ops->invalidate_range(subscription,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 577) range->mm,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 578) range->start,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 579) range->end);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 580) if (subscription->ops->invalidate_range_end) {
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 581) if (!mmu_notifier_range_blockable(range))
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 582) non_block_start();
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 583) subscription->ops->invalidate_range_end(subscription,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 584) range);
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 585) if (!mmu_notifier_range_blockable(range))
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 586) non_block_end();
ba170f76b69d1 (Daniel Vetter 2019-08-26 22:14:24 +0200 587) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 588) }
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 589) srcu_read_unlock(&srcu, id);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 590) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 591)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 592) void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 593) bool only_end)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 594) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 595) struct mmu_notifier_subscriptions *subscriptions =
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 596) range->mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 597)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 598) lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 599) if (subscriptions->has_itree)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 600) mn_itree_inv_end(subscriptions);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 601)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 602) if (!hlist_empty(&subscriptions->list))
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 603) mn_hlist_invalidate_end(subscriptions, range, only_end);
23b68395c7c78 (Daniel Vetter 2019-08-26 22:14:21 +0200 604) lock_map_release(&__mmu_notifier_invalidate_range_start_map);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 605) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 606)
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 607) void __mmu_notifier_invalidate_range(struct mm_struct *mm,
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 608) unsigned long start, unsigned long end)
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 609) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 610) struct mmu_notifier *subscription;
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 611) int id;
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 612)
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 613) id = srcu_read_lock(&srcu);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 614) hlist_for_each_entry_rcu(subscription,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 615) &mm->notifier_subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 616) srcu_read_lock_held(&srcu)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 617) if (subscription->ops->invalidate_range)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 618) subscription->ops->invalidate_range(subscription, mm,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 619) start, end);
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 620) }
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 621) srcu_read_unlock(&srcu, id);
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 622) }
0f0a327fa12cd (Joerg Roedel 2014-11-13 13:46:09 +1100 623)
56c57103db17d (Jason Gunthorpe 2019-08-06 20:15:38 -0300 624) /*
c1e8d7c6a7a68 (Michel Lespinasse 2020-06-08 21:33:54 -0700 625) * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 626) * write mode. A NULL mn signals the notifier is being registered for itree
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 627) * mode.
56c57103db17d (Jason Gunthorpe 2019-08-06 20:15:38 -0300 628) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 629) int __mmu_notifier_register(struct mmu_notifier *subscription,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 630) struct mm_struct *mm)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 631) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 632) struct mmu_notifier_subscriptions *subscriptions = NULL;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 633) int ret;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 634)
42fc541404f24 (Michel Lespinasse 2020-06-08 21:33:44 -0700 635) mmap_assert_write_locked(mm);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 636) BUG_ON(atomic_read(&mm->mm_users) <= 0);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 637)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 638) if (!mm->notifier_subscriptions) {
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 639) /*
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 640) * kmalloc cannot be called under mm_take_all_locks(), but we
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 641) * know that mm->notifier_subscriptions can't change while we
c1e8d7c6a7a68 (Michel Lespinasse 2020-06-08 21:33:54 -0700 642) * hold the write side of the mmap_lock.
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 643) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 644) subscriptions = kzalloc(
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 645) sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 646) if (!subscriptions)
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 647) return -ENOMEM;
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 648)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 649) INIT_HLIST_HEAD(&subscriptions->list);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 650) spin_lock_init(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 651) subscriptions->invalidate_seq = 2;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 652) subscriptions->itree = RB_ROOT_CACHED;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 653) init_waitqueue_head(&subscriptions->wq);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 654) INIT_HLIST_HEAD(&subscriptions->deferred_list);
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 655) }
35cfa2b0b491c (Gavin Shan 2012-10-25 13:38:01 -0700 656)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 657) ret = mm_take_all_locks(mm);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 658) if (unlikely(ret))
35cfa2b0b491c (Gavin Shan 2012-10-25 13:38:01 -0700 659) goto out_clean;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 660)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 661) /*
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 662) * Serialize the update against mmu_notifier_unregister. A
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 663) * side note: mmu_notifier_release can't run concurrently with
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 664) * us because we hold the mm_users pin (either implicitly as
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 665) * current->mm or explicitly with get_task_mm() or similar).
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 666) * We can't race against any other mmu notifier method either
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 667) * thanks to mm_take_all_locks().
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 668) *
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 669) * release semantics on the initialization of the
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 670) * mmu_notifier_subscriptions's contents are provided for unlocked
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 671) * readers. acquire can only be used while holding the mmgrab or
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 672) * mmget, and is safe because once created the
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 673) * mmu_notifier_subscriptions is not freed until the mm is destroyed.
c1e8d7c6a7a68 (Michel Lespinasse 2020-06-08 21:33:54 -0700 674) * As above, users holding the mmap_lock or one of the
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 675) * mm_take_all_locks() do not need to use acquire semantics.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 676) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 677) if (subscriptions)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 678) smp_store_release(&mm->notifier_subscriptions, subscriptions);
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 679)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 680) if (subscription) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 681) /* Pairs with the mmdrop in mmu_notifier_unregister_* */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 682) mmgrab(mm);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 683) subscription->mm = mm;
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 684) subscription->users = 1;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 685)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 686) spin_lock(&mm->notifier_subscriptions->lock);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 687) hlist_add_head_rcu(&subscription->hlist,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 688) &mm->notifier_subscriptions->list);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 689) spin_unlock(&mm->notifier_subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 690) } else
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 691) mm->notifier_subscriptions->has_itree = true;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 692)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 693) mm_drop_all_locks(mm);
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 694) BUG_ON(atomic_read(&mm->mm_users) <= 0);
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 695) return 0;
70df291bf81ff (Jason Gunthorpe 2019-08-06 20:15:39 -0300 696)
35cfa2b0b491c (Gavin Shan 2012-10-25 13:38:01 -0700 697) out_clean:
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 698) kfree(subscriptions);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 699) return ret;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 700) }
56c57103db17d (Jason Gunthorpe 2019-08-06 20:15:38 -0300 701) EXPORT_SYMBOL_GPL(__mmu_notifier_register);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 702)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 703) /**
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 704) * mmu_notifier_register - Register a notifier on a mm
d49653f35adff (Krzysztof Kozlowski 2020-08-11 18:32:09 -0700 705) * @subscription: The notifier to attach
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 706) * @mm: The mm to attach the notifier to
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 707) *
c1e8d7c6a7a68 (Michel Lespinasse 2020-06-08 21:33:54 -0700 708) * Must not hold mmap_lock nor any other VM related lock when calling
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 709) * this registration function. Must also ensure mm_users can't go down
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 710) * to zero while this runs to avoid races with mmu_notifier_release,
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 711) * so mm has to be current->mm or the mm should be pinned safely such
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 712) * as with get_task_mm(). If the mm is not current->mm, the mm_users
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 713) * pin should be released by calling mmput after mmu_notifier_register
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 714) * returns.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 715) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 716) * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 717) * unregister the notifier.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 718) *
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 719) * While the caller has a mmu_notifier get the subscription->mm pointer will remain
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 720) * valid, and can be converted to an active mm pointer via mmget_not_zero().
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 721) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 722) int mmu_notifier_register(struct mmu_notifier *subscription,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 723) struct mm_struct *mm)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 724) {
56c57103db17d (Jason Gunthorpe 2019-08-06 20:15:38 -0300 725) int ret;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 726)
d8ed45c5dcd45 (Michel Lespinasse 2020-06-08 21:33:25 -0700 727) mmap_write_lock(mm);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 728) ret = __mmu_notifier_register(subscription, mm);
d8ed45c5dcd45 (Michel Lespinasse 2020-06-08 21:33:25 -0700 729) mmap_write_unlock(mm);
56c57103db17d (Jason Gunthorpe 2019-08-06 20:15:38 -0300 730) return ret;
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 731) }
56c57103db17d (Jason Gunthorpe 2019-08-06 20:15:38 -0300 732) EXPORT_SYMBOL_GPL(mmu_notifier_register);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 733)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 734) static struct mmu_notifier *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 735) find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 736) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 737) struct mmu_notifier *subscription;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 738)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 739) spin_lock(&mm->notifier_subscriptions->lock);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 740) hlist_for_each_entry_rcu(subscription,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 741) &mm->notifier_subscriptions->list, hlist,
63886bad904b7 (Qian Cai 2020-03-21 18:22:34 -0700 742) lockdep_is_held(&mm->notifier_subscriptions->lock)) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 743) if (subscription->ops != ops)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 744) continue;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 745)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 746) if (likely(subscription->users != UINT_MAX))
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 747) subscription->users++;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 748) else
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 749) subscription = ERR_PTR(-EOVERFLOW);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 750) spin_unlock(&mm->notifier_subscriptions->lock);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 751) return subscription;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 752) }
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 753) spin_unlock(&mm->notifier_subscriptions->lock);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 754) return NULL;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 755) }
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 756)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 757) /**
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 758) * mmu_notifier_get_locked - Return the single struct mmu_notifier for
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 759) * the mm & ops
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 760) * @ops: The operations struct being subscribe with
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 761) * @mm : The mm to attach notifiers too
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 762) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 763) * This function either allocates a new mmu_notifier via
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 764) * ops->alloc_notifier(), or returns an already existing notifier on the
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 765) * list. The value of the ops pointer is used to determine when two notifiers
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 766) * are the same.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 767) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 768) * Each call to mmu_notifier_get() must be paired with a call to
c1e8d7c6a7a68 (Michel Lespinasse 2020-06-08 21:33:54 -0700 769) * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 770) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 771) * While the caller has a mmu_notifier get the mm pointer will remain valid,
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 772) * and can be converted to an active mm pointer via mmget_not_zero().
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 773) */
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 774) struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 775) struct mm_struct *mm)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 776) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 777) struct mmu_notifier *subscription;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 778) int ret;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 779)
42fc541404f24 (Michel Lespinasse 2020-06-08 21:33:44 -0700 780) mmap_assert_write_locked(mm);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 781)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 782) if (mm->notifier_subscriptions) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 783) subscription = find_get_mmu_notifier(mm, ops);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 784) if (subscription)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 785) return subscription;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 786) }
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 787)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 788) subscription = ops->alloc_notifier(mm);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 789) if (IS_ERR(subscription))
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 790) return subscription;
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 791) subscription->ops = ops;
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 792) ret = __mmu_notifier_register(subscription, mm);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 793) if (ret)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 794) goto out_free;
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 795) return subscription;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 796) out_free:
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 797) subscription->ops->free_notifier(subscription);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 798) return ERR_PTR(ret);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 799) }
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 800) EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 801)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 802) /* this is called after the last mmu_notifier_unregister() returned */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 803) void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 804) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 805) BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 806) kfree(mm->notifier_subscriptions);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 807) mm->notifier_subscriptions = LIST_POISON1; /* debug */
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 808) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 809)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 810) /*
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 811) * This releases the mm_count pin automatically and frees the mm
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 812) * structure if it was the last user of it. It serializes against
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 813) * running mmu notifiers with SRCU and against mmu_notifier_unregister
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 814) * with the unregister lock + SRCU. All sptes must be dropped before
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 815) * calling mmu_notifier_unregister. ->release or any other notifier
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 816) * method may be invoked concurrently with mmu_notifier_unregister,
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 817) * and only after mmu_notifier_unregister returned we're guaranteed
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 818) * that ->release or any other method can't run anymore.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 819) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 820) void mmu_notifier_unregister(struct mmu_notifier *subscription,
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 821) struct mm_struct *mm)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 822) {
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 823) BUG_ON(atomic_read(&mm->mm_count) <= 0);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 824)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 825) if (!hlist_unhashed(&subscription->hlist)) {
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 826) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 827) * SRCU here will force exit_mmap to wait for ->release to
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 828) * finish before freeing the pages.
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 829) */
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 830) int id;
3ad3d901bbcfb (Xiao Guangrong 2012-07-31 16:45:52 -0700 831)
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 832) id = srcu_read_lock(&srcu);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 833) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 834) * exit_mmap will block in mmu_notifier_release to guarantee
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 835) * that ->release is called before freeing the pages.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 836) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 837) if (subscription->ops->release)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 838) subscription->ops->release(subscription, mm);
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 839) srcu_read_unlock(&srcu, id);
3ad3d901bbcfb (Xiao Guangrong 2012-07-31 16:45:52 -0700 840)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 841) spin_lock(&mm->notifier_subscriptions->lock);
751efd8610d3d (Robin Holt 2013-02-22 16:35:34 -0800 842) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 843) * Can not use list_del_rcu() since __mmu_notifier_release
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 844) * can delete it before we hold the lock.
751efd8610d3d (Robin Holt 2013-02-22 16:35:34 -0800 845) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 846) hlist_del_init_rcu(&subscription->hlist);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 847) spin_unlock(&mm->notifier_subscriptions->lock);
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 848) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 849)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 850) /*
d34883d4e35c0 (Xiao Guangrong 2013-05-24 15:55:11 -0700 851) * Wait for any running method to finish, of course including
83a35e360433b (Geert Uytterhoeven 2013-06-28 11:27:31 +0200 852) * ->release if it was run by mmu_notifier_release instead of us.
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 853) */
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 854) synchronize_srcu(&srcu);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 855)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 856) BUG_ON(atomic_read(&mm->mm_count) <= 0);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 857)
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 858) mmdrop(mm);
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 859) }
cddb8a5c14aa8 (Andrea Arcangeli 2008-07-28 15:46:29 -0700 860) EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
21a92735f660e (Sagi Grimberg 2012-10-08 16:29:24 -0700 861)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 862) static void mmu_notifier_free_rcu(struct rcu_head *rcu)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 863) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 864) struct mmu_notifier *subscription =
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 865) container_of(rcu, struct mmu_notifier, rcu);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 866) struct mm_struct *mm = subscription->mm;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 867)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 868) subscription->ops->free_notifier(subscription);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 869) /* Pairs with the get in __mmu_notifier_register() */
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 870) mmdrop(mm);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 871) }
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 872)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 873) /**
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 874) * mmu_notifier_put - Release the reference on the notifier
d49653f35adff (Krzysztof Kozlowski 2020-08-11 18:32:09 -0700 875) * @subscription: The notifier to act on
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 876) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 877) * This function must be paired with each mmu_notifier_get(), it releases the
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 878) * reference obtained by the get. If this is the last reference then process
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 879) * to free the notifier will be run asynchronously.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 880) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 881) * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 882) * when the mm_struct is destroyed. Instead free_notifier is always called to
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 883) * release any resources held by the user.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 884) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 885) * As ops->release is not guaranteed to be called, the user must ensure that
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 886) * all sptes are dropped, and no new sptes can be established before
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 887) * mmu_notifier_put() is called.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 888) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 889) * This function can be called from the ops->release callback, however the
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 890) * caller must still ensure it is called pairwise with mmu_notifier_get().
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 891) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 892) * Modules calling this function must call mmu_notifier_synchronize() in
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 893) * their __exit functions to ensure the async work is completed.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 894) */
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 895) void mmu_notifier_put(struct mmu_notifier *subscription)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 896) {
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 897) struct mm_struct *mm = subscription->mm;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 898)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 899) spin_lock(&mm->notifier_subscriptions->lock);
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 900) if (WARN_ON(!subscription->users) || --subscription->users)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 901) goto out_unlock;
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 902) hlist_del_init_rcu(&subscription->hlist);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 903) spin_unlock(&mm->notifier_subscriptions->lock);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 904)
1991722a70ffb (Jason Gunthorpe 2020-01-14 11:11:17 -0400 905) call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 906) return;
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 907)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 908) out_unlock:
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 909) spin_unlock(&mm->notifier_subscriptions->lock);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 910) }
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 911) EXPORT_SYMBOL_GPL(mmu_notifier_put);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 912)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 913) static int __mmu_interval_notifier_insert(
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 914) struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 915) struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 916) unsigned long length, const struct mmu_interval_notifier_ops *ops)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 917) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 918) interval_sub->mm = mm;
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 919) interval_sub->ops = ops;
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 920) RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 921) interval_sub->interval_tree.start = start;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 922) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 923) * Note that the representation of the intervals in the interval tree
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 924) * considers the ending point as contained in the interval.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 925) */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 926) if (length == 0 ||
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 927) check_add_overflow(start, length - 1,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 928) &interval_sub->interval_tree.last))
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 929) return -EOVERFLOW;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 930)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 931) /* Must call with a mmget() held */
c9682d10271e1 (Jann Horn 2020-10-15 20:07:43 -0700 932) if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 933) return -EINVAL;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 934)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 935) /* pairs with mmdrop in mmu_interval_notifier_remove() */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 936) mmgrab(mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 937)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 938) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 939) * If some invalidate_range_start/end region is going on in parallel
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 940) * we don't know what VA ranges are affected, so we must assume this
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 941) * new range is included.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 942) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 943) * If the itree is invalidating then we are not allowed to change
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 944) * it. Retrying until invalidation is done is tricky due to the
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 945) * possibility for live lock, instead defer the add to
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 946) * mn_itree_inv_end() so this algorithm is deterministic.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 947) *
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 948) * In all cases the value for the interval_sub->invalidate_seq should be
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 949) * odd, see mmu_interval_read_begin()
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 950) */
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 951) spin_lock(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 952) if (subscriptions->active_invalidate_ranges) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 953) if (mn_itree_is_invalidating(subscriptions))
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 954) hlist_add_head(&interval_sub->deferred_item,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 955) &subscriptions->deferred_list);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 956) else {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 957) subscriptions->invalidate_seq |= 1;
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 958) interval_tree_insert(&interval_sub->interval_tree,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 959) &subscriptions->itree);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 960) }
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 961) interval_sub->invalidate_seq = subscriptions->invalidate_seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 962) } else {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 963) WARN_ON(mn_itree_is_invalidating(subscriptions));
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 964) /*
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 965) * The starting seq for a subscription not under invalidation
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 966) * should be odd, not equal to the current invalidate_seq and
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 967) * invalidate_seq should not 'wrap' to the new seq any time
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 968) * soon.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 969) */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 970) interval_sub->invalidate_seq =
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 971) subscriptions->invalidate_seq - 1;
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 972) interval_tree_insert(&interval_sub->interval_tree,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 973) &subscriptions->itree);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 974) }
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 975) spin_unlock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 976) return 0;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 977) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 978)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 979) /**
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 980) * mmu_interval_notifier_insert - Insert an interval notifier
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 981) * @interval_sub: Interval subscription to register
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 982) * @start: Starting virtual address to monitor
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 983) * @length: Length of the range to monitor
d49653f35adff (Krzysztof Kozlowski 2020-08-11 18:32:09 -0700 984) * @mm: mm_struct to attach to
d49653f35adff (Krzysztof Kozlowski 2020-08-11 18:32:09 -0700 985) * @ops: Interval notifier operations to be called on matching events
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 986) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 987) * This function subscribes the interval notifier for notifications from the
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 988) * mm. Upon return the ops related to mmu_interval_notifier will be called
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 989) * whenever an event that intersects with the given range occurs.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 990) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 991) * Upon return the range_notifier may not be present in the interval tree yet.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 992) * The caller must use the normal interval notifier read flow via
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 993) * mmu_interval_read_begin() to establish SPTEs for this range.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 994) */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 995) int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 996) struct mm_struct *mm, unsigned long start,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 997) unsigned long length,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 998) const struct mmu_interval_notifier_ops *ops)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 999) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1000) struct mmu_notifier_subscriptions *subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1001) int ret;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1002)
da1c55f1b272f (Michel Lespinasse 2020-06-08 21:33:47 -0700 1003) might_lock(&mm->mmap_lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1004)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1005) subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1006) if (!subscriptions || !subscriptions->has_itree) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1007) ret = mmu_notifier_register(NULL, mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1008) if (ret)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1009) return ret;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1010) subscriptions = mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1011) }
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1012) return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1013) start, length, ops);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1014) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1015) EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1016)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1017) int mmu_interval_notifier_insert_locked(
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1018) struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1019) unsigned long start, unsigned long length,
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1020) const struct mmu_interval_notifier_ops *ops)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1021) {
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1022) struct mmu_notifier_subscriptions *subscriptions =
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1023) mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1024) int ret;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1025)
42fc541404f24 (Michel Lespinasse 2020-06-08 21:33:44 -0700 1026) mmap_assert_write_locked(mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1027)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1028) if (!subscriptions || !subscriptions->has_itree) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1029) ret = __mmu_notifier_register(NULL, mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1030) if (ret)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1031) return ret;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1032) subscriptions = mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1033) }
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1034) return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1035) start, length, ops);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1036) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1037) EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1038)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1039) /**
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1040) * mmu_interval_notifier_remove - Remove a interval notifier
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1041) * @interval_sub: Interval subscription to unregister
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1042) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1043) * This function must be paired with mmu_interval_notifier_insert(). It cannot
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1044) * be called from any ops callback.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1045) *
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1046) * Once this returns ops callbacks are no longer running on other CPUs and
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1047) * will not be called in future.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1048) */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1049) void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1050) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1051) struct mm_struct *mm = interval_sub->mm;
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1052) struct mmu_notifier_subscriptions *subscriptions =
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1053) mm->notifier_subscriptions;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1054) unsigned long seq = 0;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1055)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1056) might_sleep();
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1057)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1058) spin_lock(&subscriptions->lock);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1059) if (mn_itree_is_invalidating(subscriptions)) {
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1060) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1061) * remove is being called after insert put this on the
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1062) * deferred list, but before the deferred list was processed.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1063) */
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1064) if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1065) hlist_del(&interval_sub->deferred_item);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1066) } else {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1067) hlist_add_head(&interval_sub->deferred_item,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1068) &subscriptions->deferred_list);
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1069) seq = subscriptions->invalidate_seq;
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1070) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1071) } else {
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1072) WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
5292e24a6acf5 (Jason Gunthorpe 2020-01-14 11:29:52 -0400 1073) interval_tree_remove(&interval_sub->interval_tree,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1074) &subscriptions->itree);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1075) }
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1076) spin_unlock(&subscriptions->lock);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1077)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1078) /*
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1079) * The possible sleep on progress in the invalidation requires the
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1080) * caller not hold any locks held by invalidation callbacks.
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1081) */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1082) lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1083) lock_map_release(&__mmu_notifier_invalidate_range_start_map);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1084) if (seq)
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1085) wait_event(subscriptions->wq,
984cfe4e25268 (Jason Gunthorpe 2019-12-18 13:40:35 -0400 1086) READ_ONCE(subscriptions->invalidate_seq) != seq);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1087)
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1088) /* pairs with mmgrab in mmu_interval_notifier_insert() */
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1089) mmdrop(mm);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1090) }
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1091) EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
99cb252f5e68d (Jason Gunthorpe 2019-11-12 16:22:19 -0400 1092)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1093) /**
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1094) * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1095) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1096) * This function ensures that all outstanding async SRU work from
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1097) * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1098) * associated with an unused mmu_notifier will no longer be called.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1099) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1100) * Before using the caller must ensure that all of its mmu_notifiers have been
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1101) * fully released via mmu_notifier_put().
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1102) *
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1103) * Modules using the mmu_notifier_put() API should call this in their __exit
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1104) * function to avoid module unloading races.
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1105) */
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1106) void mmu_notifier_synchronize(void)
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1107) {
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1108) synchronize_srcu(&srcu);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1109) }
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1110) EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
2c7933f53f6bf (Jason Gunthorpe 2019-08-06 20:15:40 -0300 1111)
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1112) bool
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1113) mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1114) {
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1115) if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1116) return false;
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1117) /* Return true if the vma still have the read flag set. */
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1118) return range->vma->vm_flags & VM_READ;
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1119) }
c6d23413f81bd (Jérôme Glisse 2019-05-13 17:21:00 -0700 1120) EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);