From ac541f2503722943a9f13e0c92ed07632ba7fd38 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Wed, 23 Oct 2019 12:55:14 -0700 Subject: mm/hmm: allow snapshot of the special zero page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a device driver like nouveau tries to use hmm_range_fault() to access the special shared zero page in system memory, hmm_range_fault() will return -EFAULT and kill the process. Allow hmm_range_fault() to return success (0) when the CPU pagetable entry points to the special shared zero page. page_to_pfn() and pfn_to_page() are defined on the zero page so just handle it like any other page. Link: https://lore.kernel.org/r/20191023195515.13168-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed-by: "Jérôme Glisse" Acked-by: David Hildenbrand Signed-off-by: Jason Gunthorpe --- mm/hmm.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/hmm.c b/mm/hmm.c index 902f5fa6bf93..6b0136665407 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -532,8 +532,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (unlikely(!hmm_vma_walk->pgmap)) return -EBUSY; } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { - *pfn = range->values[HMM_PFN_SPECIAL]; - return -EFAULT; + if (!is_zero_pfn(pte_pfn(pte))) { + *pfn = range->values[HMM_PFN_SPECIAL]; + return -EFAULT; + } + /* + * Since each architecture defines a struct page for the zero + * page, just fall through and treat it like a normal page. + */ } *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; -- cgit v1.2.3 From 56f434f40f059eb3769d50b9c244a850096c3d6f Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 12 Nov 2019 16:22:18 -0400 Subject: mm/mmu_notifier: define the header pre-processor parts even if disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have KERNEL_HEADER_TEST all headers are generally compile tested, so relying on makefile tricks to avoid compiling code that depends on CONFIG_MMU_NOTIFIER is more annoying. Instead follow the usual pattern and provide most of the header with only the functions stubbed out when CONFIG_MMU_NOTIFIER is disabled. This ensures code compiles no matter what the config setting is. While here, struct mmu_notifier_mm is private to mmu_notifier.c, move it. Link: https://lore.kernel.org/r/20191112202231.3856-2-jgg@ziepe.ca Reviewed-by: Jérôme Glisse Tested-by: Ralph Campbell Reviewed-by: John Hubbard Reviewed-by: Christoph Hellwig Signed-off-by: Jason Gunthorpe --- include/linux/mmu_notifier.h | 46 ++++++++++++++++---------------------------- mm/mmu_notifier.c | 13 +++++++++++++ 2 files changed, 30 insertions(+), 29 deletions(-) (limited to 'mm') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1bd8e6a09a3c..12bd603d318c 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -7,8 +7,9 @@ #include #include +struct mmu_notifier_mm; struct mmu_notifier; -struct mmu_notifier_ops; +struct mmu_notifier_range; /** * enum mmu_notifier_event - reason for the mmu notifier callback @@ -40,36 +41,8 @@ enum mmu_notifier_event { MMU_NOTIFY_SOFT_DIRTY, }; -#ifdef CONFIG_MMU_NOTIFIER - -#ifdef CONFIG_LOCKDEP -extern struct lockdep_map __mmu_notifier_invalidate_range_start_map; -#endif - -/* - * The mmu notifier_mm structure is allocated and installed in - * mm->mmu_notifier_mm inside the mm_take_all_locks() protected - * critical section and it's released only when mm_count reaches zero - * in mmdrop(). - */ -struct mmu_notifier_mm { - /* all mmu notifiers registerd in this mm are queued in this list */ - struct hlist_head list; - /* to serialize the list modifications and hlist_unhashed */ - spinlock_t lock; -}; - #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) -struct mmu_notifier_range { - struct vm_area_struct *vma; - struct mm_struct *mm; - unsigned long start; - unsigned long end; - unsigned flags; - enum mmu_notifier_event event; -}; - struct mmu_notifier_ops { /* * Called either by mmu_notifier_unregister or when the mm is @@ -249,6 +222,21 @@ struct mmu_notifier { unsigned int users; }; +#ifdef CONFIG_MMU_NOTIFIER + +#ifdef CONFIG_LOCKDEP +extern struct lockdep_map __mmu_notifier_invalidate_range_start_map; +#endif + +struct mmu_notifier_range { + struct vm_area_struct *vma; + struct mm_struct *mm; + unsigned long start; + unsigned long end; + unsigned flags; + enum mmu_notifier_event event; +}; + static inline int mm_has_notifiers(struct mm_struct *mm) { return unlikely(mm->mmu_notifier_mm); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 7fde88695f35..367670cfd02b 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -27,6 +27,19 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = { }; #endif +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registered in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + /* * This function can't run concurrently against mmu_notifier_register * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap -- cgit v1.2.3 From 99cb252f5e68d72afa3245a4e73d216d295cd335 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 12 Nov 2019 16:22:19 -0400 Subject: mm/mmu_notifier: add an interval tree notifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Of the 13 users of mmu_notifiers, 8 of them use only invalidate_range_start/end() and immediately intersect the mmu_notifier_range with some kind of internal list of VAs. 4 use an interval tree (i915_gem, radeon_mn, umem_odp, hfi1). 4 use a linked list of some kind (scif_dma, vhost, gntdev, hmm) And the remaining 5 either don't use invalidate_range_start() or do some special thing with it. It turns out that building a correct scheme with an interval tree is pretty complicated, particularly if the use case is synchronizing against another thread doing get_user_pages(). Many of these implementations have various subtle and difficult to fix races. This approach puts the interval tree as common code at the top of the mmu notifier call tree and implements a shareable locking scheme. It includes: - An interval tree tracking VA ranges, with per-range callbacks - A read/write locking scheme for the interval tree that avoids sleeping in the notifier path (for OOM killer) - A sequence counter based collision-retry locking scheme to tell device page fault that a VA range is being concurrently invalidated. This is based on various ideas: - hmm accumulates invalidated VA ranges and releases them when all invalidates are done, via active_invalidate_ranges count. This approach avoids having to intersect the interval tree twice (as umem_odp does) at the potential cost of a longer device page fault. - kvm/umem_odp use a sequence counter to drive the collision retry, via invalidate_seq - a deferred work todo list on unlock scheme like RTNL, via deferred_list. This makes adding/removing interval tree members more deterministic - seqlock, except this version makes the seqlock idea multi-holder on the write side by protecting it with active_invalidate_ranges and a spinlock To minimize MM overhead when only the interval tree is being used, the entire SRCU and hlist overheads are dropped using some simple branches. Similarly the interval tree overhead is dropped when in hlist mode. The overhead from the mandatory spinlock is broadly the same as most of existing users which already had a lock (or two) of some sort on the invalidation path. Link: https://lore.kernel.org/r/20191112202231.3856-3-jgg@ziepe.ca Acked-by: Christian König Tested-by: Philip Yang Tested-by: Ralph Campbell Reviewed-by: John Hubbard Reviewed-by: Christoph Hellwig Signed-off-by: Jason Gunthorpe --- include/linux/mmu_notifier.h | 101 ++++++++ mm/Kconfig | 1 + mm/mmu_notifier.c | 544 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 620 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 12bd603d318c..9e6caa8ecd19 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -6,10 +6,12 @@ #include #include #include +#include struct mmu_notifier_mm; struct mmu_notifier; struct mmu_notifier_range; +struct mmu_interval_notifier; /** * enum mmu_notifier_event - reason for the mmu notifier callback @@ -32,6 +34,9 @@ struct mmu_notifier_range; * access flags). User should soft dirty the page in the end callback to make * sure that anyone relying on soft dirtyness catch pages that might be written * through non CPU mappings. + * + * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal + * that the mm refcount is zero and the range is no longer accessible. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, @@ -39,6 +44,7 @@ enum mmu_notifier_event { MMU_NOTIFY_PROTECTION_VMA, MMU_NOTIFY_PROTECTION_PAGE, MMU_NOTIFY_SOFT_DIRTY, + MMU_NOTIFY_RELEASE, }; #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) @@ -222,6 +228,26 @@ struct mmu_notifier { unsigned int users; }; +/** + * struct mmu_interval_notifier_ops + * @invalidate: Upon return the caller must stop using any SPTEs within this + * range. This function can sleep. Return false only if sleeping + * was required but mmu_notifier_range_blockable(range) is false. + */ +struct mmu_interval_notifier_ops { + bool (*invalidate)(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq); +}; + +struct mmu_interval_notifier { + struct interval_tree_node interval_tree; + const struct mmu_interval_notifier_ops *ops; + struct mm_struct *mm; + struct hlist_node deferred_item; + unsigned long invalidate_seq; +}; + #ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_LOCKDEP @@ -263,6 +289,81 @@ extern int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); extern void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm); + +unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni); +int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni, + struct mm_struct *mm, unsigned long start, + unsigned long length, + const struct mmu_interval_notifier_ops *ops); +int mmu_interval_notifier_insert_locked( + struct mmu_interval_notifier *mni, struct mm_struct *mm, + unsigned long start, unsigned long length, + const struct mmu_interval_notifier_ops *ops); +void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni); + +/** + * mmu_interval_set_seq - Save the invalidation sequence + * @mni - The mni passed to invalidate + * @cur_seq - The cur_seq passed to the invalidate() callback + * + * This must be called unconditionally from the invalidate callback of a + * struct mmu_interval_notifier_ops under the same lock that is used to call + * mmu_interval_read_retry(). It updates the sequence number for later use by + * mmu_interval_read_retry(). The provided cur_seq will always be odd. + * + * If the caller does not call mmu_interval_read_begin() or + * mmu_interval_read_retry() then this call is not required. + */ +static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni, + unsigned long cur_seq) +{ + WRITE_ONCE(mni->invalidate_seq, cur_seq); +} + +/** + * mmu_interval_read_retry - End a read side critical section against a VA range + * mni: The range + * seq: The return of the paired mmu_interval_read_begin() + * + * This MUST be called under a user provided lock that is also held + * unconditionally by op->invalidate() when it calls mmu_interval_set_seq(). + * + * Each call should be paired with a single mmu_interval_read_begin() and + * should be used to conclude the read side. + * + * Returns true if an invalidation collided with this critical section, and + * the caller should retry. + */ +static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni, + unsigned long seq) +{ + return mni->invalidate_seq != seq; +} + +/** + * mmu_interval_check_retry - Test if a collision has occurred + * mni: The range + * seq: The return of the matching mmu_interval_read_begin() + * + * This can be used in the critical section between mmu_interval_read_begin() + * and mmu_interval_read_retry(). A return of true indicates an invalidation + * has collided with this critical region and a future + * mmu_interval_read_retry() will return true. + * + * False is not reliable and only suggests a collision may not have + * occured. It can be called many times and does not have to hold the user + * provided lock. + * + * This call can be used as part of loops and other expensive operations to + * expedite a retry. + */ +static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni, + unsigned long seq) +{ + /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ + return READ_ONCE(mni->invalidate_seq) != seq; +} + extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, diff --git a/mm/Kconfig b/mm/Kconfig index a5dae9a7eb51..d0b5046d9aef 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -284,6 +284,7 @@ config VIRT_TO_BUS config MMU_NOTIFIER bool select SRCU + select INTERVAL_TREE config KSM bool "Enable KSM for page merging" diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 367670cfd02b..30abbfdc25be 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -36,10 +37,245 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = { struct mmu_notifier_mm { /* all mmu notifiers registered in this mm are queued in this list */ struct hlist_head list; + bool has_itree; /* to serialize the list modifications and hlist_unhashed */ spinlock_t lock; + unsigned long invalidate_seq; + unsigned long active_invalidate_ranges; + struct rb_root_cached itree; + wait_queue_head_t wq; + struct hlist_head deferred_list; }; +/* + * This is a collision-retry read-side/write-side 'lock', a lot like a + * seqcount, however this allows multiple write-sides to hold it at + * once. Conceptually the write side is protecting the values of the PTEs in + * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any + * writer exists. + * + * Note that the core mm creates nested invalidate_range_start()/end() regions + * within the same thread, and runs invalidate_range_start()/end() in parallel + * on multiple CPUs. This is designed to not reduce concurrency or block + * progress on the mm side. + * + * As a secondary function, holding the full write side also serves to prevent + * writers for the itree, this is an optimization to avoid extra locking + * during invalidate_range_start/end notifiers. + * + * The write side has two states, fully excluded: + * - mm->active_invalidate_ranges != 0 + * - mnn->invalidate_seq & 1 == True (odd) + * - some range on the mm_struct is being invalidated + * - the itree is not allowed to change + * + * And partially excluded: + * - mm->active_invalidate_ranges != 0 + * - mnn->invalidate_seq & 1 == False (even) + * - some range on the mm_struct is being invalidated + * - the itree is allowed to change + * + * Operations on mmu_notifier_mm->invalidate_seq (under spinlock): + * seq |= 1 # Begin writing + * seq++ # Release the writing state + * seq & 1 # True if a writer exists + * + * The later state avoids some expensive work on inv_end in the common case of + * no mni monitoring the VA. + */ +static bool mn_itree_is_invalidating(struct mmu_notifier_mm *mmn_mm) +{ + lockdep_assert_held(&mmn_mm->lock); + return mmn_mm->invalidate_seq & 1; +} + +static struct mmu_interval_notifier * +mn_itree_inv_start_range(struct mmu_notifier_mm *mmn_mm, + const struct mmu_notifier_range *range, + unsigned long *seq) +{ + struct interval_tree_node *node; + struct mmu_interval_notifier *res = NULL; + + spin_lock(&mmn_mm->lock); + mmn_mm->active_invalidate_ranges++; + node = interval_tree_iter_first(&mmn_mm->itree, range->start, + range->end - 1); + if (node) { + mmn_mm->invalidate_seq |= 1; + res = container_of(node, struct mmu_interval_notifier, + interval_tree); + } + + *seq = mmn_mm->invalidate_seq; + spin_unlock(&mmn_mm->lock); + return res; +} + +static struct mmu_interval_notifier * +mn_itree_inv_next(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range) +{ + struct interval_tree_node *node; + + node = interval_tree_iter_next(&mni->interval_tree, range->start, + range->end - 1); + if (!node) + return NULL; + return container_of(node, struct mmu_interval_notifier, interval_tree); +} + +static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm) +{ + struct mmu_interval_notifier *mni; + struct hlist_node *next; + + spin_lock(&mmn_mm->lock); + if (--mmn_mm->active_invalidate_ranges || + !mn_itree_is_invalidating(mmn_mm)) { + spin_unlock(&mmn_mm->lock); + return; + } + + /* Make invalidate_seq even */ + mmn_mm->invalidate_seq++; + + /* + * The inv_end incorporates a deferred mechanism like rtnl_unlock(). + * Adds and removes are queued until the final inv_end happens then + * they are progressed. This arrangement for tree updates is used to + * avoid using a blocking lock during invalidate_range_start. + */ + hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list, + deferred_item) { + if (RB_EMPTY_NODE(&mni->interval_tree.rb)) + interval_tree_insert(&mni->interval_tree, + &mmn_mm->itree); + else + interval_tree_remove(&mni->interval_tree, + &mmn_mm->itree); + hlist_del(&mni->deferred_item); + } + spin_unlock(&mmn_mm->lock); + + wake_up_all(&mmn_mm->wq); +} + +/** + * mmu_interval_read_begin - Begin a read side critical section against a VA + * range + * mni: The range to use + * + * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a + * collision-retry scheme similar to seqcount for the VA range under mni. If + * the mm invokes invalidation during the critical section then + * mmu_interval_read_retry() will return true. + * + * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs + * require a blocking context. The critical region formed by this can sleep, + * and the required 'user_lock' can also be a sleeping lock. + * + * The caller is required to provide a 'user_lock' to serialize both teardown + * and setup. + * + * The return value should be passed to mmu_interval_read_retry(). + */ +unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni) +{ + struct mmu_notifier_mm *mmn_mm = mni->mm->mmu_notifier_mm; + unsigned long seq; + bool is_invalidating; + + /* + * If the mni has a different seq value under the user_lock than we + * started with then it has collided. + * + * If the mni currently has the same seq value as the mmn_mm seq, then + * it is currently between invalidate_start/end and is colliding. + * + * The locking looks broadly like this: + * mn_tree_invalidate_start(): mmu_interval_read_begin(): + * spin_lock + * seq = READ_ONCE(mni->invalidate_seq); + * seq == mmn_mm->invalidate_seq + * spin_unlock + * spin_lock + * seq = ++mmn_mm->invalidate_seq + * spin_unlock + * op->invalidate_range(): + * user_lock + * mmu_interval_set_seq() + * mni->invalidate_seq = seq + * user_unlock + * + * [Required: mmu_interval_read_retry() == true] + * + * mn_itree_inv_end(): + * spin_lock + * seq = ++mmn_mm->invalidate_seq + * spin_unlock + * + * user_lock + * mmu_interval_read_retry(): + * mni->invalidate_seq != seq + * user_unlock + * + * Barriers are not needed here as any races here are closed by an + * eventual mmu_interval_read_retry(), which provides a barrier via the + * user_lock. + */ + spin_lock(&mmn_mm->lock); + /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ + seq = READ_ONCE(mni->invalidate_seq); + is_invalidating = seq == mmn_mm->invalidate_seq; + spin_unlock(&mmn_mm->lock); + + /* + * mni->invalidate_seq must always be set to an odd value via + * mmu_interval_set_seq() using the provided cur_seq from + * mn_itree_inv_start_range(). This ensures that if seq does wrap we + * will always clear the below sleep in some reasonable time as + * mmn_mm->invalidate_seq is even in the idle state. + */ + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); + lock_map_release(&__mmu_notifier_invalidate_range_start_map); + if (is_invalidating) + wait_event(mmn_mm->wq, + READ_ONCE(mmn_mm->invalidate_seq) != seq); + + /* + * Notice that mmu_interval_read_retry() can already be true at this + * point, avoiding loops here allows the caller to provide a global + * time bound. + */ + + return seq; +} +EXPORT_SYMBOL_GPL(mmu_interval_read_begin); + +static void mn_itree_release(struct mmu_notifier_mm *mmn_mm, + struct mm_struct *mm) +{ + struct mmu_notifier_range range = { + .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, + .event = MMU_NOTIFY_RELEASE, + .mm = mm, + .start = 0, + .end = ULONG_MAX, + }; + struct mmu_interval_notifier *mni; + unsigned long cur_seq; + bool ret; + + for (mni = mn_itree_inv_start_range(mmn_mm, &range, &cur_seq); mni; + mni = mn_itree_inv_next(mni, &range)) { + ret = mni->ops->invalidate(mni, &range, cur_seq); + WARN_ON(!ret); + } + + mn_itree_inv_end(mmn_mm); +} + /* * This function can't run concurrently against mmu_notifier_register * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap @@ -52,7 +288,8 @@ struct mmu_notifier_mm { * can't go away from under us as exit_mmap holds an mm_count pin * itself. */ -void __mmu_notifier_release(struct mm_struct *mm) +static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm, + struct mm_struct *mm) { struct mmu_notifier *mn; int id; @@ -62,7 +299,7 @@ void __mmu_notifier_release(struct mm_struct *mm) * ->release returns. */ id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) + hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) /* * If ->release runs before mmu_notifier_unregister it must be * handled, as it's the only way for the driver to flush all @@ -72,10 +309,9 @@ void __mmu_notifier_release(struct mm_struct *mm) if (mn->ops->release) mn->ops->release(mn, mm); - spin_lock(&mm->mmu_notifier_mm->lock); - while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { - mn = hlist_entry(mm->mmu_notifier_mm->list.first, - struct mmu_notifier, + spin_lock(&mmn_mm->lock); + while (unlikely(!hlist_empty(&mmn_mm->list))) { + mn = hlist_entry(mmn_mm->list.first, struct mmu_notifier, hlist); /* * We arrived before mmu_notifier_unregister so @@ -85,7 +321,7 @@ void __mmu_notifier_release(struct mm_struct *mm) */ hlist_del_init_rcu(&mn->hlist); } - spin_unlock(&mm->mmu_notifier_mm->lock); + spin_unlock(&mmn_mm->lock); srcu_read_unlock(&srcu, id); /* @@ -100,6 +336,17 @@ void __mmu_notifier_release(struct mm_struct *mm) synchronize_srcu(&srcu); } +void __mmu_notifier_release(struct mm_struct *mm) +{ + struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm; + + if (mmn_mm->has_itree) + mn_itree_release(mmn_mm, mm); + + if (!hlist_empty(&mmn_mm->list)) + mn_hlist_release(mmn_mm, mm); +} + /* * If no young bitflag is supported by the hardware, ->clear_flush_young can * unmap the address and return 1 or 0 depending if the mapping previously @@ -172,14 +419,43 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, srcu_read_unlock(&srcu, id); } -int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) +static int mn_itree_invalidate(struct mmu_notifier_mm *mmn_mm, + const struct mmu_notifier_range *range) +{ + struct mmu_interval_notifier *mni; + unsigned long cur_seq; + + for (mni = mn_itree_inv_start_range(mmn_mm, range, &cur_seq); mni; + mni = mn_itree_inv_next(mni, range)) { + bool ret; + + ret = mni->ops->invalidate(mni, range, cur_seq); + if (!ret) { + if (WARN_ON(mmu_notifier_range_blockable(range))) + continue; + goto out_would_block; + } + } + return 0; + +out_would_block: + /* + * On -EAGAIN the non-blocking caller is not allowed to call + * invalidate_range_end() + */ + mn_itree_inv_end(mmn_mm); + return -EAGAIN; +} + +static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm, + struct mmu_notifier_range *range) { struct mmu_notifier *mn; int ret = 0; int id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) { if (mn->ops->invalidate_range_start) { int _ret; @@ -203,15 +479,30 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) return ret; } -void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, - bool only_end) +int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) +{ + struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm; + int ret; + + if (mmn_mm->has_itree) { + ret = mn_itree_invalidate(mmn_mm, range); + if (ret) + return ret; + } + if (!hlist_empty(&mmn_mm->list)) + return mn_hlist_invalidate_range_start(mmn_mm, range); + return 0; +} + +static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm, + struct mmu_notifier_range *range, + bool only_end) { struct mmu_notifier *mn; int id; - lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) { /* * Call invalidate_range here too to avoid the need for the * subsystem of having to register an invalidate_range_end @@ -238,6 +529,19 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, } } srcu_read_unlock(&srcu, id); +} + +void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, + bool only_end) +{ + struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm; + + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); + if (mmn_mm->has_itree) + mn_itree_inv_end(mmn_mm); + + if (!hlist_empty(&mmn_mm->list)) + mn_hlist_invalidate_end(mmn_mm, range, only_end); lock_map_release(&__mmu_notifier_invalidate_range_start_map); } @@ -256,8 +560,9 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm, } /* - * Same as mmu_notifier_register but here the caller must hold the - * mmap_sem in write mode. + * Same as mmu_notifier_register but here the caller must hold the mmap_sem in + * write mode. A NULL mn signals the notifier is being registered for itree + * mode. */ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) { @@ -274,9 +579,6 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) fs_reclaim_release(GFP_KERNEL); } - mn->mm = mm; - mn->users = 1; - if (!mm->mmu_notifier_mm) { /* * kmalloc cannot be called under mm_take_all_locks(), but we @@ -284,21 +586,22 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) * the write side of the mmap_sem. */ mmu_notifier_mm = - kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); + kzalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); if (!mmu_notifier_mm) return -ENOMEM; INIT_HLIST_HEAD(&mmu_notifier_mm->list); spin_lock_init(&mmu_notifier_mm->lock); + mmu_notifier_mm->invalidate_seq = 2; + mmu_notifier_mm->itree = RB_ROOT_CACHED; + init_waitqueue_head(&mmu_notifier_mm->wq); + INIT_HLIST_HEAD(&mmu_notifier_mm->deferred_list); } ret = mm_take_all_locks(mm); if (unlikely(ret)) goto out_clean; - /* Pairs with the mmdrop in mmu_notifier_unregister_* */ - mmgrab(mm); - /* * Serialize the update against mmu_notifier_unregister. A * side note: mmu_notifier_release can't run concurrently with @@ -306,13 +609,28 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) * current->mm or explicitly with get_task_mm() or similar). * We can't race against any other mmu notifier method either * thanks to mm_take_all_locks(). + * + * release semantics on the initialization of the mmu_notifier_mm's + * contents are provided for unlocked readers. acquire can only be + * used while holding the mmgrab or mmget, and is safe because once + * created the mmu_notififer_mm is not freed until the mm is + * destroyed. As above, users holding the mmap_sem or one of the + * mm_take_all_locks() do not need to use acquire semantics. */ if (mmu_notifier_mm) - mm->mmu_notifier_mm = mmu_notifier_mm; + smp_store_release(&mm->mmu_notifier_mm, mmu_notifier_mm); - spin_lock(&mm->mmu_notifier_mm->lock); - hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); - spin_unlock(&mm->mmu_notifier_mm->lock); + if (mn) { + /* Pairs with the mmdrop in mmu_notifier_unregister_* */ + mmgrab(mm); + mn->mm = mm; + mn->users = 1; + + spin_lock(&mm->mmu_notifier_mm->lock); + hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); + spin_unlock(&mm->mmu_notifier_mm->lock); + } else + mm->mmu_notifier_mm->has_itree = true; mm_drop_all_locks(mm); BUG_ON(atomic_read(&mm->mm_users) <= 0); @@ -529,6 +847,180 @@ out_unlock: } EXPORT_SYMBOL_GPL(mmu_notifier_put); +static int __mmu_interval_notifier_insert( + struct mmu_interval_notifier *mni, struct mm_struct *mm, + struct mmu_notifier_mm *mmn_mm, unsigned long start, + unsigned long length, const struct mmu_interval_notifier_ops *ops) +{ + mni->mm = mm; + mni->ops = ops; + RB_CLEAR_NODE(&mni->interval_tree.rb); + mni->interval_tree.start = start; + /* + * Note that the representation of the intervals in the interval tree + * considers the ending point as contained in the interval. + */ + if (length == 0 || + check_add_overflow(start, length - 1, &mni->interval_tree.last)) + return -EOVERFLOW; + + /* Must call with a mmget() held */ + if (WARN_ON(atomic_read(&mm->mm_count) <= 0)) + return -EINVAL; + + /* pairs with mmdrop in mmu_interval_notifier_remove() */ + mmgrab(mm); + + /* + * If some invalidate_range_start/end region is going on in parallel + * we don't know what VA ranges are affected, so we must assume this + * new range is included. + * + * If the itree is invalidating then we are not allowed to change + * it. Retrying until invalidation is done is tricky due to the + * possibility for live lock, instead defer the add to + * mn_itree_inv_end() so this algorithm is deterministic. + * + * In all cases the value for the mni->invalidate_seq should be + * odd, see mmu_interval_read_begin() + */ + spin_lock(&mmn_mm->lock); + if (mmn_mm->active_invalidate_ranges) { + if (mn_itree_is_invalidating(mmn_mm)) + hlist_add_head(&mni->deferred_item, + &mmn_mm->deferred_list); + else { + mmn_mm->invalidate_seq |= 1; + interval_tree_insert(&mni->interval_tree, + &mmn_mm->itree); + } + mni->invalidate_seq = mmn_mm->invalidate_seq; + } else { + WARN_ON(mn_itree_is_invalidating(mmn_mm)); + /* + * The starting seq for a mni not under invalidation should be + * odd, not equal to the current invalidate_seq and + * invalidate_seq should not 'wrap' to the new seq any time + * soon. + */ + mni->invalidate_seq = mmn_mm->invalidate_seq - 1; + interval_tree_insert(&mni->interval_tree, &mmn_mm->itree); + } + spin_unlock(&mmn_mm->lock); + return 0; +} + +/** + * mmu_interval_notifier_insert - Insert an interval notifier + * @mni: Interval notifier to register + * @start: Starting virtual address to monitor + * @length: Length of the range to monitor + * @mm : mm_struct to attach to + * + * This function subscribes the interval notifier for notifications from the + * mm. Upon return the ops related to mmu_interval_notifier will be called + * whenever an event that intersects with the given range occurs. + * + * Upon return the range_notifier may not be present in the interval tree yet. + * The caller must use the normal interval notifier read flow via + * mmu_interval_read_begin() to establish SPTEs for this range. + */ +int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni, + struct mm_struct *mm, unsigned long start, + unsigned long length, + const struct mmu_interval_notifier_ops *ops) +{ + struct mmu_notifier_mm *mmn_mm; + int ret; + + might_lock(&mm->mmap_sem); + + mmn_mm = smp_load_acquire(&mm->mmu_notifier_mm); + if (!mmn_mm || !mmn_mm->has_itree) { + ret = mmu_notifier_register(NULL, mm); + if (ret) + return ret; + mmn_mm = mm->mmu_notifier_mm; + } + return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length, + ops); +} +EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); + +int mmu_interval_notifier_insert_locked( + struct mmu_interval_notifier *mni, struct mm_struct *mm, + unsigned long start, unsigned long length, + const struct mmu_interval_notifier_ops *ops) +{ + struct mmu_notifier_mm *mmn_mm; + int ret; + + lockdep_assert_held_write(&mm->mmap_sem); + + mmn_mm = mm->mmu_notifier_mm; + if (!mmn_mm || !mmn_mm->has_itree) { + ret = __mmu_notifier_register(NULL, mm); + if (ret) + return ret; + mmn_mm = mm->mmu_notifier_mm; + } + return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length, + ops); +} +EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); + +/** + * mmu_interval_notifier_remove - Remove a interval notifier + * @mni: Interval notifier to unregister + * + * This function must be paired with mmu_interval_notifier_insert(). It cannot + * be called from any ops callback. + * + * Once this returns ops callbacks are no longer running on other CPUs and + * will not be called in future. + */ +void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni) +{ + struct mm_struct *mm = mni->mm; + struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm; + unsigned long seq = 0; + + might_sleep(); + + spin_lock(&mmn_mm->lock); + if (mn_itree_is_invalidating(mmn_mm)) { + /* + * remove is being called after insert put this on the + * deferred list, but before the deferred list was processed. + */ + if (RB_EMPTY_NODE(&mni->interval_tree.rb)) { + hlist_del(&mni->deferred_item); + } else { + hlist_add_head(&mni->deferred_item, + &mmn_mm->deferred_list); + seq = mmn_mm->invalidate_seq; + } + } else { + WARN_ON(RB_EMPTY_NODE(&mni->interval_tree.rb)); + interval_tree_remove(&mni->interval_tree, &mmn_mm->itree); + } + spin_unlock(&mmn_mm->lock); + + /* + * The possible sleep on progress in the invalidation requires the + * caller not hold any locks held by invalidation callbacks. + */ + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); + lock_map_release(&__mmu_notifier_invalidate_range_start_map); + if (seq) + wait_event(mmn_mm->wq, + READ_ONCE(mmn_mm->invalidate_seq) != seq); + + /* pairs with mmgrab in mmu_interval_notifier_insert() */ + mmdrop(mm); +} +EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); + /** * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed * -- cgit v1.2.3 From 04ec32fbc2b29a640d67872d2f88daac4c73e45b Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 12 Nov 2019 16:22:20 -0400 Subject: mm/hmm: allow hmm_range to be used with a mmu_interval_notifier or hmm_mirror MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hmm_mirror's handling of ranges does not use a sequence count which results in this bug: CPU0 CPU1 hmm_range_wait_until_valid(range) valid == true hmm_range_fault(range) hmm_invalidate_range_start() range->valid = false hmm_invalidate_range_end() range->valid = true hmm_range_valid(range) valid == true Where the hmm_range_valid() should not have succeeded. Adding the required sequence count would make it nearly identical to the new mmu_interval_notifier. Instead replace the hmm_mirror stuff with mmu_interval_notifier. Co-existence of the two APIs is the first step. Link: https://lore.kernel.org/r/20191112202231.3856-4-jgg@ziepe.ca Reviewed-by: Jérôme Glisse Tested-by: Philip Yang Tested-by: Ralph Campbell Reviewed-by: Christoph Hellwig Signed-off-by: Jason Gunthorpe --- include/linux/hmm.h | 5 +++++ mm/hmm.c | 25 +++++++++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 3fec513b9c00..fbb35c78637e 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -145,6 +145,9 @@ enum hmm_pfn_value_e { /* * struct hmm_range - track invalidation lock on virtual address range * + * @notifier: an optional mmu_interval_notifier + * @notifier_seq: when notifier is used this is the result of + * mmu_interval_read_begin() * @hmm: the core HMM structure this range is active against * @vma: the vm area struct for the range * @list: all range lock are on a list @@ -159,6 +162,8 @@ enum hmm_pfn_value_e { * @valid: pfns array did not change since it has been fill by an HMM function */ struct hmm_range { + struct mmu_interval_notifier *notifier; + unsigned long notifier_seq; struct hmm *hmm; struct list_head list; unsigned long start; diff --git a/mm/hmm.c b/mm/hmm.c index 6b0136665407..8d060c5dabe3 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -858,6 +858,14 @@ void hmm_range_unregister(struct hmm_range *range) } EXPORT_SYMBOL(hmm_range_unregister); +static bool needs_retry(struct hmm_range *range) +{ + if (range->notifier) + return mmu_interval_check_retry(range->notifier, + range->notifier_seq); + return !range->valid; +} + static const struct mm_walk_ops hmm_walk_ops = { .pud_entry = hmm_vma_walk_pud, .pmd_entry = hmm_vma_walk_pmd, @@ -898,18 +906,23 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; unsigned long start = range->start, end; struct hmm_vma_walk hmm_vma_walk; - struct hmm *hmm = range->hmm; + struct mm_struct *mm; struct vm_area_struct *vma; int ret; - lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem); + if (range->notifier) + mm = range->notifier->mm; + else + mm = range->hmm->mmu_notifier.mm; + + lockdep_assert_held(&mm->mmap_sem); do { /* If range is no longer valid force retry. */ - if (!range->valid) + if (needs_retry(range)) return -EBUSY; - vma = find_vma(hmm->mmu_notifier.mm, start); + vma = find_vma(mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) return -EFAULT; @@ -939,7 +952,7 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) start = hmm_vma_walk.last; /* Keep trying while the range is valid. */ - } while (ret == -EBUSY && range->valid); + } while (ret == -EBUSY && !needs_retry(range)); if (ret) { unsigned long i; @@ -997,7 +1010,7 @@ long hmm_range_dma_map(struct hmm_range *range, struct device *device, continue; /* Check if range is being invalidated */ - if (!range->valid) { + if (needs_retry(range)) { ret = -EBUSY; goto unmap; } -- cgit v1.2.3 From a22dd506400d0f4784ad596f073b9eb5ed7c6a2a Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 12 Nov 2019 16:22:30 -0400 Subject: mm/hmm: remove hmm_mirror and related MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The only two users of this are now converted to use mmu_interval_notifier, delete all the code and update hmm.rst. Link: https://lore.kernel.org/r/20191112202231.3856-14-jgg@ziepe.ca Reviewed-by: Jérôme Glisse Tested-by: Ralph Campbell Reviewed-by: Christoph Hellwig Signed-off-by: Jason Gunthorpe --- Documentation/vm/hmm.rst | 105 ++++------------- include/linux/hmm.h | 183 +----------------------------- mm/Kconfig | 1 - mm/hmm.c | 285 ++--------------------------------------------- 4 files changed, 34 insertions(+), 540 deletions(-) (limited to 'mm') diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index 0a5960beccf7..893a8ba0e9fe 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -147,49 +147,16 @@ Address space mirroring implementation and API Address space mirroring's main objective is to allow duplication of a range of CPU page table into a device page table; HMM helps keep both synchronized. A device driver that wants to mirror a process address space must start with the -registration of an hmm_mirror struct:: - - int hmm_mirror_register(struct hmm_mirror *mirror, - struct mm_struct *mm); - -The mirror struct has a set of callbacks that are used -to propagate CPU page tables:: - - struct hmm_mirror_ops { - /* release() - release hmm_mirror - * - * @mirror: pointer to struct hmm_mirror - * - * This is called when the mm_struct is being released. The callback - * must ensure that all access to any pages obtained from this mirror - * is halted before the callback returns. All future access should - * fault. - */ - void (*release)(struct hmm_mirror *mirror); - - /* sync_cpu_device_pagetables() - synchronize page tables - * - * @mirror: pointer to struct hmm_mirror - * @update: update information (see struct mmu_notifier_range) - * Return: -EAGAIN if update.blockable false and callback need to - * block, 0 otherwise. - * - * This callback ultimately originates from mmu_notifiers when the CPU - * page table is updated. The device driver must update its page table - * in response to this callback. The update argument tells what action - * to perform. - * - * The device driver must not return from this callback until the device - * page tables are completely updated (TLBs flushed, etc); this is a - * synchronous call. - */ - int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, - const struct hmm_update *update); - }; - -The device driver must perform the update action to the range (mark range -read only, or fully unmap, etc.). The device must complete the update before -the driver callback returns. +registration of a mmu_interval_notifier:: + + mni->ops = &driver_ops; + int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni, + unsigned long start, unsigned long length, + struct mm_struct *mm); + +During the driver_ops->invalidate() callback the device driver must perform +the update action to the range (mark range read only, or fully unmap, +etc.). The device must complete the update before the driver callback returns. When the device driver wants to populate a range of virtual addresses, it can use:: @@ -216,70 +183,46 @@ The usage pattern is:: struct hmm_range range; ... + range.notifier = &mni; range.start = ...; range.end = ...; range.pfns = ...; range.flags = ...; range.values = ...; range.pfn_shift = ...; - hmm_range_register(&range, mirror); - /* - * Just wait for range to be valid, safe to ignore return value as we - * will use the return value of hmm_range_fault() below under the - * mmap_sem to ascertain the validity of the range. - */ - hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC); + if (!mmget_not_zero(mni->notifier.mm)) + return -EFAULT; again: + range.notifier_seq = mmu_interval_read_begin(&mni); down_read(&mm->mmap_sem); ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT); if (ret) { up_read(&mm->mmap_sem); - if (ret == -EBUSY) { - /* - * No need to check hmm_range_wait_until_valid() return value - * on retry we will get proper error with hmm_range_fault() - */ - hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC); - goto again; - } - hmm_range_unregister(&range); + if (ret == -EBUSY) + goto again; return ret; } + up_read(&mm->mmap_sem); + take_lock(driver->update); - if (!hmm_range_valid(&range)) { + if (mmu_interval_read_retry(&ni, range.notifier_seq) { release_lock(driver->update); - up_read(&mm->mmap_sem); goto again; } - // Use pfns array content to update device page table + /* Use pfns array content to update device page table, + * under the update lock */ - hmm_range_unregister(&range); release_lock(driver->update); - up_read(&mm->mmap_sem); return 0; } The driver->update lock is the same lock that the driver takes inside its -sync_cpu_device_pagetables() callback. That lock must be held before calling -hmm_range_valid() to avoid any race with a concurrent CPU page table update. - -HMM implements all this on top of the mmu_notifier API because we wanted a -simpler API and also to be able to perform optimizations latter on like doing -concurrent device updates in multi-devices scenario. - -HMM also serves as an impedance mismatch between how CPU page table updates -are done (by CPU write to the page table and TLB flushes) and how devices -update their own page table. Device updates are a multi-step process. First, -appropriate commands are written to a buffer, then this buffer is scheduled for -execution on the device. It is only once the device has executed commands in -the buffer that the update is done. Creating and scheduling the update command -buffer can happen concurrently for multiple devices. Waiting for each device to -report commands as executed is serialized (there is no point in doing this -concurrently). - +invalidate() callback. That lock must be held before calling +mmu_interval_read_retry() to avoid any race with a concurrent CPU page table +update. Leverage default_flags and pfn_flags_mask ========================================= diff --git a/include/linux/hmm.h b/include/linux/hmm.h index cb69bf10dc78..1225b3c87aba 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -68,29 +68,6 @@ #include #include - -/* - * struct hmm - HMM per mm struct - * - * @mm: mm struct this HMM struct is bound to - * @lock: lock protecting ranges list - * @ranges: list of range being snapshotted - * @mirrors: list of mirrors for this mm - * @mmu_notifier: mmu notifier to track updates to CPU page table - * @mirrors_sem: read/write semaphore protecting the mirrors list - * @wq: wait queue for user waiting on a range invalidation - * @notifiers: count of active mmu notifiers - */ -struct hmm { - struct mmu_notifier mmu_notifier; - spinlock_t ranges_lock; - struct list_head ranges; - struct list_head mirrors; - struct rw_semaphore mirrors_sem; - wait_queue_head_t wq; - long notifiers; -}; - /* * hmm_pfn_flag_e - HMM flag enums * @@ -143,9 +120,8 @@ enum hmm_pfn_value_e { /* * struct hmm_range - track invalidation lock on virtual address range * - * @notifier: an optional mmu_interval_notifier - * @notifier_seq: when notifier is used this is the result of - * mmu_interval_read_begin() + * @notifier: a mmu_interval_notifier that includes the start/end + * @notifier_seq: result of mmu_interval_read_begin() * @hmm: the core HMM structure this range is active against * @vma: the vm area struct for the range * @list: all range lock are on a list @@ -162,8 +138,6 @@ enum hmm_pfn_value_e { struct hmm_range { struct mmu_interval_notifier *notifier; unsigned long notifier_seq; - struct hmm *hmm; - struct list_head list; unsigned long start; unsigned long end; uint64_t *pfns; @@ -172,32 +146,8 @@ struct hmm_range { uint64_t default_flags; uint64_t pfn_flags_mask; uint8_t pfn_shift; - bool valid; }; -/* - * hmm_range_wait_until_valid() - wait for range to be valid - * @range: range affected by invalidation to wait on - * @timeout: time out for wait in ms (ie abort wait after that period of time) - * Return: true if the range is valid, false otherwise. - */ -static inline bool hmm_range_wait_until_valid(struct hmm_range *range, - unsigned long timeout) -{ - return wait_event_timeout(range->hmm->wq, range->valid, - msecs_to_jiffies(timeout)) != 0; -} - -/* - * hmm_range_valid() - test if a range is valid or not - * @range: range - * Return: true if the range is valid, false otherwise. - */ -static inline bool hmm_range_valid(struct hmm_range *range) -{ - return range->valid; -} - /* * hmm_device_entry_to_page() - return struct page pointed to by a device entry * @range: range use to decode device entry value @@ -267,111 +217,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, range->flags[HMM_PFN_VALID]; } -/* - * Mirroring: how to synchronize device page table with CPU page table. - * - * A device driver that is participating in HMM mirroring must always - * synchronize with CPU page table updates. For this, device drivers can either - * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device - * drivers can decide to register one mirror per device per process, or just - * one mirror per process for a group of devices. The pattern is: - * - * int device_bind_address_space(..., struct mm_struct *mm, ...) - * { - * struct device_address_space *das; - * - * // Device driver specific initialization, and allocation of das - * // which contains an hmm_mirror struct as one of its fields. - * ... - * - * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); - * if (ret) { - * // Cleanup on error - * return ret; - * } - * - * // Other device driver specific initialization - * ... - * } - * - * Once an hmm_mirror is registered for an address space, the device driver - * will get callbacks through sync_cpu_device_pagetables() operation (see - * hmm_mirror_ops struct). - * - * Device driver must not free the struct containing the hmm_mirror struct - * before calling hmm_mirror_unregister(). The expected usage is to do that when - * the device driver is unbinding from an address space. - * - * - * void device_unbind_address_space(struct device_address_space *das) - * { - * // Device driver specific cleanup - * ... - * - * hmm_mirror_unregister(&das->mirror); - * - * // Other device driver specific cleanup, and now das can be freed - * ... - * } - */ - -struct hmm_mirror; - -/* - * struct hmm_mirror_ops - HMM mirror device operations callback - * - * @update: callback to update range on a device - */ -struct hmm_mirror_ops { - /* release() - release hmm_mirror - * - * @mirror: pointer to struct hmm_mirror - * - * This is called when the mm_struct is being released. The callback - * must ensure that all access to any pages obtained from this mirror - * is halted before the callback returns. All future access should - * fault. - */ - void (*release)(struct hmm_mirror *mirror); - - /* sync_cpu_device_pagetables() - synchronize page tables - * - * @mirror: pointer to struct hmm_mirror - * @update: update information (see struct mmu_notifier_range) - * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false - * and callback needs to block, 0 otherwise. - * - * This callback ultimately originates from mmu_notifiers when the CPU - * page table is updated. The device driver must update its page table - * in response to this callback. The update argument tells what action - * to perform. - * - * The device driver must not return from this callback until the device - * page tables are completely updated (TLBs flushed, etc); this is a - * synchronous call. - */ - int (*sync_cpu_device_pagetables)( - struct hmm_mirror *mirror, - const struct mmu_notifier_range *update); -}; - -/* - * struct hmm_mirror - mirror struct for a device driver - * - * @hmm: pointer to struct hmm (which is unique per mm_struct) - * @ops: device driver callback for HMM mirror operations - * @list: for list of mirrors of a given mm - * - * Each address space (mm_struct) being mirrored by a device must register one - * instance of an hmm_mirror struct with HMM. HMM will track the list of all - * mirrors for each mm_struct. - */ -struct hmm_mirror { - struct hmm *hmm; - const struct hmm_mirror_ops *ops; - struct list_head list; -}; - /* * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. */ @@ -381,15 +226,9 @@ struct hmm_mirror { #define HMM_FAULT_SNAPSHOT (1 << 1) #ifdef CONFIG_HMM_MIRROR -int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); -void hmm_mirror_unregister(struct hmm_mirror *mirror); - /* * Please see Documentation/vm/hmm.rst for how to use the range API. */ -int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror); -void hmm_range_unregister(struct hmm_range *range); - long hmm_range_fault(struct hmm_range *range, unsigned int flags); long hmm_range_dma_map(struct hmm_range *range, @@ -401,24 +240,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, dma_addr_t *daddrs, bool dirty); #else -int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) -{ - return -EOPNOTSUPP; -} - -void hmm_mirror_unregister(struct hmm_mirror *mirror) -{ -} - -int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) -{ - return -EOPNOTSUPP; -} - -void hmm_range_unregister(struct hmm_range *range) -{ -} - static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags) { return -EOPNOTSUPP; diff --git a/mm/Kconfig b/mm/Kconfig index d0b5046d9aef..e38ff1d5968d 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -675,7 +675,6 @@ config DEV_PAGEMAP_OPS config HMM_MIRROR bool depends on MMU - depends on MMU_NOTIFIER config DEVICE_PRIVATE bool "Unaddressable device memory (GPU memory, ...)" diff --git a/mm/hmm.c b/mm/hmm.c index 8d060c5dabe3..aed2f39d1a98 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -26,193 +26,6 @@ #include #include -static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm) -{ - struct hmm *hmm; - - hmm = kzalloc(sizeof(*hmm), GFP_KERNEL); - if (!hmm) - return ERR_PTR(-ENOMEM); - - init_waitqueue_head(&hmm->wq); - INIT_LIST_HEAD(&hmm->mirrors); - init_rwsem(&hmm->mirrors_sem); - INIT_LIST_HEAD(&hmm->ranges); - spin_lock_init(&hmm->ranges_lock); - hmm->notifiers = 0; - return &hmm->mmu_notifier; -} - -static void hmm_free_notifier(struct mmu_notifier *mn) -{ - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); - - WARN_ON(!list_empty(&hmm->ranges)); - WARN_ON(!list_empty(&hmm->mirrors)); - kfree(hmm); -} - -static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) -{ - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); - struct hmm_mirror *mirror; - - /* - * Since hmm_range_register() holds the mmget() lock hmm_release() is - * prevented as long as a range exists. - */ - WARN_ON(!list_empty_careful(&hmm->ranges)); - - down_read(&hmm->mirrors_sem); - list_for_each_entry(mirror, &hmm->mirrors, list) { - /* - * Note: The driver is not allowed to trigger - * hmm_mirror_unregister() from this thread. - */ - if (mirror->ops->release) - mirror->ops->release(mirror); - } - up_read(&hmm->mirrors_sem); -} - -static void notifiers_decrement(struct hmm *hmm) -{ - unsigned long flags; - - spin_lock_irqsave(&hmm->ranges_lock, flags); - hmm->notifiers--; - if (!hmm->notifiers) { - struct hmm_range *range; - - list_for_each_entry(range, &hmm->ranges, list) { - if (range->valid) - continue; - range->valid = true; - } - wake_up_all(&hmm->wq); - } - spin_unlock_irqrestore(&hmm->ranges_lock, flags); -} - -static int hmm_invalidate_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *nrange) -{ - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); - struct hmm_mirror *mirror; - struct hmm_range *range; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&hmm->ranges_lock, flags); - hmm->notifiers++; - list_for_each_entry(range, &hmm->ranges, list) { - if (nrange->end < range->start || nrange->start >= range->end) - continue; - - range->valid = false; - } - spin_unlock_irqrestore(&hmm->ranges_lock, flags); - - if (mmu_notifier_range_blockable(nrange)) - down_read(&hmm->mirrors_sem); - else if (!down_read_trylock(&hmm->mirrors_sem)) { - ret = -EAGAIN; - goto out; - } - - list_for_each_entry(mirror, &hmm->mirrors, list) { - int rc; - - rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); - if (rc) { - if (WARN_ON(mmu_notifier_range_blockable(nrange) || - rc != -EAGAIN)) - continue; - ret = -EAGAIN; - break; - } - } - up_read(&hmm->mirrors_sem); - -out: - if (ret) - notifiers_decrement(hmm); - return ret; -} - -static void hmm_invalidate_range_end(struct mmu_notifier *mn, - const struct mmu_notifier_range *nrange) -{ - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); - - notifiers_decrement(hmm); -} - -static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { - .release = hmm_release, - .invalidate_range_start = hmm_invalidate_range_start, - .invalidate_range_end = hmm_invalidate_range_end, - .alloc_notifier = hmm_alloc_notifier, - .free_notifier = hmm_free_notifier, -}; - -/* - * hmm_mirror_register() - register a mirror against an mm - * - * @mirror: new mirror struct to register - * @mm: mm to register against - * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments - * - * To start mirroring a process address space, the device driver must register - * an HMM mirror struct. - * - * The caller cannot unregister the hmm_mirror while any ranges are - * registered. - * - * Callers using this function must put a call to mmu_notifier_synchronize() - * in their module exit functions. - */ -int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) -{ - struct mmu_notifier *mn; - - lockdep_assert_held_write(&mm->mmap_sem); - - /* Sanity check */ - if (!mm || !mirror || !mirror->ops) - return -EINVAL; - - mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm); - if (IS_ERR(mn)) - return PTR_ERR(mn); - mirror->hmm = container_of(mn, struct hmm, mmu_notifier); - - down_write(&mirror->hmm->mirrors_sem); - list_add(&mirror->list, &mirror->hmm->mirrors); - up_write(&mirror->hmm->mirrors_sem); - - return 0; -} -EXPORT_SYMBOL(hmm_mirror_register); - -/* - * hmm_mirror_unregister() - unregister a mirror - * - * @mirror: mirror struct to unregister - * - * Stop mirroring a process address space, and cleanup. - */ -void hmm_mirror_unregister(struct hmm_mirror *mirror) -{ - struct hmm *hmm = mirror->hmm; - - down_write(&hmm->mirrors_sem); - list_del(&mirror->list); - up_write(&hmm->mirrors_sem); - mmu_notifier_put(&hmm->mmu_notifier); -} -EXPORT_SYMBOL(hmm_mirror_unregister); - struct hmm_vma_walk { struct hmm_range *range; struct dev_pagemap *pgmap; @@ -785,87 +598,6 @@ static void hmm_pfns_clear(struct hmm_range *range, *pfns = range->values[HMM_PFN_NONE]; } -/* - * hmm_range_register() - start tracking change to CPU page table over a range - * @range: range - * @mm: the mm struct for the range of virtual address - * - * Return: 0 on success, -EFAULT if the address space is no longer valid - * - * Track updates to the CPU page table see include/linux/hmm.h - */ -int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) -{ - struct hmm *hmm = mirror->hmm; - unsigned long flags; - - range->valid = false; - range->hmm = NULL; - - if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1))) - return -EINVAL; - if (range->start >= range->end) - return -EINVAL; - - /* Prevent hmm_release() from running while the range is valid */ - if (!mmget_not_zero(hmm->mmu_notifier.mm)) - return -EFAULT; - - /* Initialize range to track CPU page table updates. */ - spin_lock_irqsave(&hmm->ranges_lock, flags); - - range->hmm = hmm; - list_add(&range->list, &hmm->ranges); - - /* - * If there are any concurrent notifiers we have to wait for them for - * the range to be valid (see hmm_range_wait_until_valid()). - */ - if (!hmm->notifiers) - range->valid = true; - spin_unlock_irqrestore(&hmm->ranges_lock, flags); - - return 0; -} -EXPORT_SYMBOL(hmm_range_register); - -/* - * hmm_range_unregister() - stop tracking change to CPU page table over a range - * @range: range - * - * Range struct is used to track updates to the CPU page table after a call to - * hmm_range_register(). See include/linux/hmm.h for how to use it. - */ -void hmm_range_unregister(struct hmm_range *range) -{ - struct hmm *hmm = range->hmm; - unsigned long flags; - - spin_lock_irqsave(&hmm->ranges_lock, flags); - list_del_init(&range->list); - spin_unlock_irqrestore(&hmm->ranges_lock, flags); - - /* Drop reference taken by hmm_range_register() */ - mmput(hmm->mmu_notifier.mm); - - /* - * The range is now invalid and the ref on the hmm is dropped, so - * poison the pointer. Leave other fields in place, for the caller's - * use. - */ - range->valid = false; - memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); -} -EXPORT_SYMBOL(hmm_range_unregister); - -static bool needs_retry(struct hmm_range *range) -{ - if (range->notifier) - return mmu_interval_check_retry(range->notifier, - range->notifier_seq); - return !range->valid; -} - static const struct mm_walk_ops hmm_walk_ops = { .pud_entry = hmm_vma_walk_pud, .pmd_entry = hmm_vma_walk_pmd, @@ -906,20 +638,16 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; unsigned long start = range->start, end; struct hmm_vma_walk hmm_vma_walk; - struct mm_struct *mm; + struct mm_struct *mm = range->notifier->mm; struct vm_area_struct *vma; int ret; - if (range->notifier) - mm = range->notifier->mm; - else - mm = range->hmm->mmu_notifier.mm; - lockdep_assert_held(&mm->mmap_sem); do { /* If range is no longer valid force retry. */ - if (needs_retry(range)) + if (mmu_interval_check_retry(range->notifier, + range->notifier_seq)) return -EBUSY; vma = find_vma(mm, start); @@ -952,7 +680,9 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) start = hmm_vma_walk.last; /* Keep trying while the range is valid. */ - } while (ret == -EBUSY && !needs_retry(range)); + } while (ret == -EBUSY && + !mmu_interval_check_retry(range->notifier, + range->notifier_seq)); if (ret) { unsigned long i; @@ -1010,7 +740,8 @@ long hmm_range_dma_map(struct hmm_range *range, struct device *device, continue; /* Check if range is being invalidated */ - if (needs_retry(range)) { + if (mmu_interval_check_retry(range->notifier, + range->notifier_seq)) { ret = -EBUSY; goto unmap; } -- cgit v1.2.3 From d28c2c9a487708b9db519ce7fedc10333032c76b Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Mon, 4 Nov 2019 14:21:40 -0800 Subject: mm/hmm: make full use of walk_page_range() hmm_range_fault() calls find_vma() and walk_page_range() in a loop. This is unnecessary duplication since walk_page_range() calls find_vma() in a loop already. Simplify hmm_range_fault() by defining a walk_test() callback function to filter unhandled vmas. This also fixes a bug where hmm_range_fault() was not checking start >= vma->vm_start before checking vma->vm_flags so hmm_range_fault() could return an error based on the wrong vma for the requested range. It also fixes a bug when the vma has no read access and the caller did not request a fault, there shouldn't be any error return code. Link: https://lore.kernel.org/r/20191104222141.5173-2-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- mm/hmm.c | 120 +++++++++++++++++++++++++++++---------------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) (limited to 'mm') diff --git a/mm/hmm.c b/mm/hmm.c index aed2f39d1a98..d903b1555de4 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -65,18 +65,15 @@ err: return -EFAULT; } -static int hmm_pfns_bad(unsigned long addr, - unsigned long end, - struct mm_walk *walk) +static int hmm_pfns_fill(unsigned long addr, unsigned long end, + struct hmm_range *range, enum hmm_pfn_value_e value) { - struct hmm_vma_walk *hmm_vma_walk = walk->private; - struct hmm_range *range = hmm_vma_walk->range; uint64_t *pfns = range->pfns; unsigned long i; i = (addr - range->start) >> PAGE_SHIFT; for (; addr < end; addr += PAGE_SIZE, i++) - pfns[i] = range->values[HMM_PFN_ERROR]; + pfns[i] = range->values[value]; return 0; } @@ -403,7 +400,7 @@ again: } return 0; } else if (!pmd_present(pmd)) - return hmm_pfns_bad(start, end, walk); + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { /* @@ -431,7 +428,7 @@ again: * recover. */ if (pmd_bad(pmd)) - return hmm_pfns_bad(start, end, walk); + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); ptep = pte_offset_map(pmdp, addr); i = (addr - range->start) >> PAGE_SHIFT; @@ -589,13 +586,47 @@ unlock: #define hmm_vma_walk_hugetlb_entry NULL #endif /* CONFIG_HUGETLB_PAGE */ -static void hmm_pfns_clear(struct hmm_range *range, - uint64_t *pfns, - unsigned long addr, - unsigned long end) +static int hmm_vma_walk_test(unsigned long start, unsigned long end, + struct mm_walk *walk) { - for (; addr < end; addr += PAGE_SIZE, pfns++) - *pfns = range->values[HMM_PFN_NONE]; + struct hmm_vma_walk *hmm_vma_walk = walk->private; + struct hmm_range *range = hmm_vma_walk->range; + struct vm_area_struct *vma = walk->vma; + + /* + * Skip vma ranges that don't have struct page backing them or + * map I/O devices directly. + */ + if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) + return -EFAULT; + + /* + * If the vma does not allow read access, then assume that it does not + * allow write access either. HMM does not support architectures + * that allow write without read. + */ + if (!(vma->vm_flags & VM_READ)) { + bool fault, write_fault; + + /* + * Check to see if a fault is requested for any page in the + * range. + */ + hmm_range_need_fault(hmm_vma_walk, range->pfns + + ((start - range->start) >> PAGE_SHIFT), + (end - start) >> PAGE_SHIFT, + 0, &fault, &write_fault); + if (fault || write_fault) + return -EFAULT; + + hmm_pfns_fill(start, end, range, HMM_PFN_NONE); + hmm_vma_walk->last = end; + + /* Skip this vma and continue processing the next vma. */ + return 1; + } + + return 0; } static const struct mm_walk_ops hmm_walk_ops = { @@ -603,6 +634,7 @@ static const struct mm_walk_ops hmm_walk_ops = { .pmd_entry = hmm_vma_walk_pmd, .pte_hole = hmm_vma_walk_hole, .hugetlb_entry = hmm_vma_walk_hugetlb_entry, + .test_walk = hmm_vma_walk_test, }; /** @@ -635,11 +667,12 @@ static const struct mm_walk_ops hmm_walk_ops = { */ long hmm_range_fault(struct hmm_range *range, unsigned int flags) { - const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; - unsigned long start = range->start, end; - struct hmm_vma_walk hmm_vma_walk; + struct hmm_vma_walk hmm_vma_walk = { + .range = range, + .last = range->start, + .flags = flags, + }; struct mm_struct *mm = range->notifier->mm; - struct vm_area_struct *vma; int ret; lockdep_assert_held(&mm->mmap_sem); @@ -649,53 +682,12 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) if (mmu_interval_check_retry(range->notifier, range->notifier_seq)) return -EBUSY; + ret = walk_page_range(mm, hmm_vma_walk.last, range->end, + &hmm_walk_ops, &hmm_vma_walk); + } while (ret == -EBUSY); - vma = find_vma(mm, start); - if (vma == NULL || (vma->vm_flags & device_vma)) - return -EFAULT; - - if (!(vma->vm_flags & VM_READ)) { - /* - * If vma do not allow read access, then assume that it - * does not allow write access, either. HMM does not - * support architecture that allow write without read. - */ - hmm_pfns_clear(range, range->pfns, - range->start, range->end); - return -EPERM; - } - - hmm_vma_walk.pgmap = NULL; - hmm_vma_walk.last = start; - hmm_vma_walk.flags = flags; - hmm_vma_walk.range = range; - end = min(range->end, vma->vm_end); - - walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops, - &hmm_vma_walk); - - do { - ret = walk_page_range(vma->vm_mm, start, end, - &hmm_walk_ops, &hmm_vma_walk); - start = hmm_vma_walk.last; - - /* Keep trying while the range is valid. */ - } while (ret == -EBUSY && - !mmu_interval_check_retry(range->notifier, - range->notifier_seq)); - - if (ret) { - unsigned long i; - - i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; - hmm_pfns_clear(range, &range->pfns[i], - hmm_vma_walk.last, range->end); - return ret; - } - start = end; - - } while (start < range->end); - + if (ret) + return ret; return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; } EXPORT_SYMBOL(hmm_range_fault); -- cgit v1.2.3 From 93f4e735b6d98ee4b7a1252d81e815a983e359f2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Nov 2019 14:45:28 +0100 Subject: mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap These two functions have never been used since they were added. Link: https://lore.kernel.org/r/20191113134528.21187-1-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- include/linux/hmm.h | 23 -------- mm/hmm.c | 147 ---------------------------------------------------- 2 files changed, 170 deletions(-) (limited to 'mm') diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 1225b3c87aba..ddf9f7144c43 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -230,34 +230,11 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, * Please see Documentation/vm/hmm.rst for how to use the range API. */ long hmm_range_fault(struct hmm_range *range, unsigned int flags); - -long hmm_range_dma_map(struct hmm_range *range, - struct device *device, - dma_addr_t *daddrs, - unsigned int flags); -long hmm_range_dma_unmap(struct hmm_range *range, - struct device *device, - dma_addr_t *daddrs, - bool dirty); #else static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags) { return -EOPNOTSUPP; } - -static inline long hmm_range_dma_map(struct hmm_range *range, - struct device *device, dma_addr_t *daddrs, - unsigned int flags) -{ - return -EOPNOTSUPP; -} - -static inline long hmm_range_dma_unmap(struct hmm_range *range, - struct device *device, - dma_addr_t *daddrs, bool dirty) -{ - return -EOPNOTSUPP; -} #endif /* diff --git a/mm/hmm.c b/mm/hmm.c index d903b1555de4..d379cb6496ae 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -691,150 +691,3 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; } EXPORT_SYMBOL(hmm_range_fault); - -/** - * hmm_range_dma_map - hmm_range_fault() and dma map page all in one. - * @range: range being faulted - * @device: device to map page to - * @daddrs: array of dma addresses for the mapped pages - * @flags: HMM_FAULT_* - * - * Return: the number of pages mapped on success (including zero), or any - * status return from hmm_range_fault() otherwise. - */ -long hmm_range_dma_map(struct hmm_range *range, struct device *device, - dma_addr_t *daddrs, unsigned int flags) -{ - unsigned long i, npages, mapped; - long ret; - - ret = hmm_range_fault(range, flags); - if (ret <= 0) - return ret ? ret : -EBUSY; - - npages = (range->end - range->start) >> PAGE_SHIFT; - for (i = 0, mapped = 0; i < npages; ++i) { - enum dma_data_direction dir = DMA_TO_DEVICE; - struct page *page; - - /* - * FIXME need to update DMA API to provide invalid DMA address - * value instead of a function to test dma address value. This - * would remove lot of dumb code duplicated accross many arch. - * - * For now setting it to 0 here is good enough as the pfns[] - * value is what is use to check what is valid and what isn't. - */ - daddrs[i] = 0; - - page = hmm_device_entry_to_page(range, range->pfns[i]); - if (page == NULL) - continue; - - /* Check if range is being invalidated */ - if (mmu_interval_check_retry(range->notifier, - range->notifier_seq)) { - ret = -EBUSY; - goto unmap; - } - - /* If it is read and write than map bi-directional. */ - if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) - dir = DMA_BIDIRECTIONAL; - - daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); - if (dma_mapping_error(device, daddrs[i])) { - ret = -EFAULT; - goto unmap; - } - - mapped++; - } - - return mapped; - -unmap: - for (npages = i, i = 0; (i < npages) && mapped; ++i) { - enum dma_data_direction dir = DMA_TO_DEVICE; - struct page *page; - - page = hmm_device_entry_to_page(range, range->pfns[i]); - if (page == NULL) - continue; - - if (dma_mapping_error(device, daddrs[i])) - continue; - - /* If it is read and write than map bi-directional. */ - if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) - dir = DMA_BIDIRECTIONAL; - - dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); - mapped--; - } - - return ret; -} -EXPORT_SYMBOL(hmm_range_dma_map); - -/** - * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() - * @range: range being unmapped - * @device: device against which dma map was done - * @daddrs: dma address of mapped pages - * @dirty: dirty page if it had the write flag set - * Return: number of page unmapped on success, -EINVAL otherwise - * - * Note that caller MUST abide by mmu notifier or use HMM mirror and abide - * to the sync_cpu_device_pagetables() callback so that it is safe here to - * call set_page_dirty(). Caller must also take appropriate locks to avoid - * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. - */ -long hmm_range_dma_unmap(struct hmm_range *range, - struct device *device, - dma_addr_t *daddrs, - bool dirty) -{ - unsigned long i, npages; - long cpages = 0; - - /* Sanity check. */ - if (range->end <= range->start) - return -EINVAL; - if (!daddrs) - return -EINVAL; - if (!range->pfns) - return -EINVAL; - - npages = (range->end - range->start) >> PAGE_SHIFT; - for (i = 0; i < npages; ++i) { - enum dma_data_direction dir = DMA_TO_DEVICE; - struct page *page; - - page = hmm_device_entry_to_page(range, range->pfns[i]); - if (page == NULL) - continue; - - /* If it is read and write than map bi-directional. */ - if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { - dir = DMA_BIDIRECTIONAL; - - /* - * See comments in function description on why it is - * safe here to call set_page_dirty() - */ - if (dirty) - set_page_dirty(page); - } - - /* Unmap and clear pfns/dma address */ - dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); - range->pfns[i] = range->values[HMM_PFN_NONE]; - /* FIXME see comments in hmm_vma_dma_map() */ - daddrs[i] = 0; - cpages++; - } - - return cpages; -} -EXPORT_SYMBOL(hmm_range_dma_unmap); -- cgit v1.2.3