diff options
author | Kairui Song <kasong@tencent.com> | 2024-11-04 18:52:57 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-11-12 02:22:26 +0100 |
commit | da0c02516c501b43bd39ad4aca5779c86153d143 (patch) | |
tree | fb6d62186590f2191f808c14432425a055ae10e1 /mm/workingset.c | |
parent | mm/list_lru: split the lock to per-cgroup scope (diff) | |
download | linux-da0c02516c501b43bd39ad4aca5779c86153d143.tar.xz linux-da0c02516c501b43bd39ad4aca5779c86153d143.zip |
mm/list_lru: simplify the list_lru walk callback function
Now isolation no longer takes the list_lru global node lock, only use the
per-cgroup lock instead. And this lock is inside the list_lru_one being
walked, no longer needed to pass the lock explicitly.
Link: https://lkml.kernel.org/r/20241104175257.60853-7-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Chengming Zhou <zhouchengming@bytedance.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r-- | mm/workingset.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/mm/workingset.c b/mm/workingset.c index c187d4a3fbea..a4705e196545 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -702,8 +702,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, static enum lru_status shadow_lru_isolate(struct list_head *item, struct list_lru_one *lru, - spinlock_t *lru_lock, - void *arg) __must_hold(lru_lock) + void *arg) __must_hold(lru->lock) { struct xa_node *node = container_of(item, struct xa_node, private_list); struct address_space *mapping; @@ -712,20 +711,20 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, /* * Page cache insertions and deletions synchronously maintain * the shadow node LRU under the i_pages lock and the - * lru_lock. Because the page cache tree is emptied before - * the inode can be destroyed, holding the lru_lock pins any + * &lru->lock. Because the page cache tree is emptied before + * the inode can be destroyed, holding the &lru->lock pins any * address_space that has nodes on the LRU. * * We can then safely transition to the i_pages lock to * pin only the address_space of the particular node we want - * to reclaim, take the node off-LRU, and drop the lru_lock. + * to reclaim, take the node off-LRU, and drop the &lru->lock. */ mapping = container_of(node->array, struct address_space, i_pages); /* Coming from the list, invert the lock order */ if (!xa_trylock(&mapping->i_pages)) { - spin_unlock_irq(lru_lock); + spin_unlock_irq(&lru->lock); ret = LRU_RETRY; goto out; } @@ -734,7 +733,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, if (mapping->host != NULL) { if (!spin_trylock(&mapping->host->i_lock)) { xa_unlock(&mapping->i_pages); - spin_unlock_irq(lru_lock); + spin_unlock_irq(&lru->lock); ret = LRU_RETRY; goto out; } @@ -743,7 +742,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, list_lru_isolate(lru, item); __dec_node_page_state(virt_to_page(node), WORKINGSET_NODES); - spin_unlock(lru_lock); + spin_unlock(&lru->lock); /* * The nodes should only contain one or more shadow entries, |