summaryrefslogtreecommitdiffstats
path: root/mm/zswap.c
diff options
context:
space:
mode:
authorYosry Ahmed <yosryahmed@google.com>2024-06-11 04:45:15 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-07-04 04:30:08 +0200
commit2d4d2b1cfb85cc07f6d5619acb882d8b11e55cf4 (patch)
tree15a2db9e05347bdf1a51e71f3ebbb0e6812d21c8 /mm/zswap.c
parentmm: zswap: rename is_zswap_enabled() to zswap_is_enabled() (diff)
downloadlinux-2d4d2b1cfb85cc07f6d5619acb882d8b11e55cf4.tar.xz
linux-2d4d2b1cfb85cc07f6d5619acb882d8b11e55cf4.zip
mm: zswap: add zswap_never_enabled()
Add zswap_never_enabled() to skip the xarray lookup in zswap_load() if zswap was never enabled on the system. It is implemented using static branches for efficiency, as enabling zswap should be a rare event. This could shave some cycles off zswap_load() when CONFIG_ZSWAP is used but zswap is never enabled. However, the real motivation behind this patch is two-fold: - Incoming large folio swapin work will need to fallback to order-0 folios if zswap was ever enabled, because any part of the folio could be in zswap, until proper handling of large folios with zswap is added. - A warning and recovery attempt will be added in a following change in case the above was not done incorrectly. Zswap will fail the read if the folio is large and it was ever enabled. Expose zswap_never_enabled() in the header for the swapin work to use it later. [yosryahmed@google.com: expose zswap_never_enabled() in the header] Link: https://lkml.kernel.org/r/Zmjf0Dr8s9xSW41X@google.com Link: https://lkml.kernel.org/r/20240611024516.1375191-2-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Reviewed-by: Nhat Pham <nphamcs@gmail.com> Cc: Barry Song <baohua@kernel.org> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Chris Li <chrisl@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index a8c8dd8cfe6f..9d4e54282b5f 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -83,6 +83,7 @@ static bool zswap_pool_reached_full;
static int zswap_setup(void);
/* Enable/disable zswap */
+static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
static int zswap_enabled_param_set(const char *,
const struct kernel_param *);
@@ -136,6 +137,11 @@ bool zswap_is_enabled(void)
return zswap_enabled;
}
+bool zswap_never_enabled(void)
+{
+ return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
+}
+
/*********************************
* data structures
**********************************/
@@ -1557,6 +1563,9 @@ bool zswap_load(struct folio *folio)
VM_WARN_ON_ONCE(!folio_test_locked(folio));
+ if (zswap_never_enabled())
+ return false;
+
/*
* When reading into the swapcache, invalidate our entry. The
* swapcache can be the authoritative owner of the page and
@@ -1735,6 +1744,7 @@ static int zswap_setup(void)
zpool_get_type(pool->zpools[0]));
list_add(&pool->list, &zswap_pools);
zswap_has_pool = true;
+ static_branch_enable(&zswap_ever_enabled);
} else {
pr_err("pool creation failed\n");
zswap_enabled = false;