summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/sysfs.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-09-05 02:49:37 +0200
committerKent Overstreet <kent.overstreet@linux.dev>2024-09-21 17:39:48 +0200
commit7a51608d0125469664e2daf8e060d6d783924c98 (patch)
tree4fa41c4ed7b94e602ad97f8ed73da7e4b56f6893 /fs/bcachefs/sysfs.c
parentbcachefs: split up btree cache counters for live, freeable (diff)
downloadlinux-7a51608d0125469664e2daf8e060d6d783924c98.tar.xz
linux-7a51608d0125469664e2daf8e060d6d783924c98.zip
bcachefs: Rework btree node pinning
In backpointers fsck, we do a seqential scan of one btree, and check references to another: extents <-> backpointers Checking references generates random lookups, so we want to pin that btree in memory (or only a range, if it doesn't fit in ram). Previously, this was done with a simple check in the shrinker - "if btree node is in range being pinned, don't free it" - but this generated OOMs, as our shrinker wasn't well behaved if there was less memory available than expected. Instead, we now have two different shrinkers and lru lists; the second shrinker being for pinned nodes, with seeks set much higher than normal - so they can still be freed if necessary, but we'll prefer not to. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to '')
-rw-r--r--fs/bcachefs/sysfs.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 6791540d6a4a..03e59f86f360 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -244,14 +244,18 @@ static struct attribute sysfs_state_rw = {
static size_t bch2_btree_cache_size(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
size_t ret = 0;
struct btree *b;
- mutex_lock(&c->btree_cache.lock);
- list_for_each_entry(b, &c->btree_cache.live, list)
+ mutex_lock(&bc->lock);
+ list_for_each_entry(b, &bc->live[0].list, list)
ret += btree_buf_bytes(b);
-
- mutex_unlock(&c->btree_cache.lock);
+ list_for_each_entry(b, &bc->live[1].list, list)
+ ret += btree_buf_bytes(b);
+ list_for_each_entry(b, &bc->freeable, list)
+ ret += btree_buf_bytes(b);
+ mutex_unlock(&bc->lock);
return ret;
}
@@ -444,11 +448,12 @@ STORE(bch2_fs)
return -EROFS;
if (attr == &sysfs_trigger_btree_cache_shrink) {
+ struct btree_cache *bc = &c->btree_cache;
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
+ bc->live[0].shrink->scan_objects(bc->live[0].shrink, &sc);
}
if (attr == &sysfs_trigger_btree_key_cache_shrink) {