summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/move.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-12-15 03:59:33 +0100
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 23:08:50 +0200
commit35a067b42dcfd884fb132128ae94f240c6511fea (patch)
tree39ec0fe89492d3322900902ae40f3dacd54d8291 /fs/bcachefs/move.c
parentbcachefs: Don't use BTREE_INSERT_USE_RESERVE so much (diff)
downloadlinux-35a067b42dcfd884fb132128ae94f240c6511fea.tar.xz
linux-35a067b42dcfd884fb132128ae94f240c6511fea.zip
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation whenever the new extent took up more space on disk than the old extent. Erasure coding screwed this up, because with erasure coding writes are initially replicated, and then in the background the extra replicas are dropped when the stripe is created. This means that with erasure coding enabled, writes will always take up more space on disk than the data they're overwriting - but, according to posix, overwrites aren't supposed to return ENOSPC. So, in this patch we fudge things: if the new extent has more replicas than the _effective_ replicas of the old extent, or if the old extent is compressed and the new one isn't, we check for ENOSPC when getting the disk reservation - otherwise, we don't. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to '')
-rw-r--r--fs/bcachefs/move.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 50b7363fe84b..7f0990617b29 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -76,17 +76,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bool did_work = false;
- int nr;
+ bool extending = false, should_check_enospc;
+ s64 i_sectors_delta = 0, disk_sectors_delta = 0;
bch2_trans_reset(&trans, 0);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
- if (ret) {
- if (ret == -EINTR)
- continue;
- break;
- }
+ if (ret)
+ goto err;
new = bkey_i_to_extent(bch2_keylist_front(keys));
@@ -143,23 +141,21 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
op->opts.background_target,
op->opts.data_replicas);
- /*
- * If we're not fully overwriting @k, and it's compressed, we
- * need a reservation for all the pointers in @insert
- */
- nr = bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(insert)) -
- m->nr_ptrs_reserved;
+ ret = bch2_sum_sector_overwrites(&trans, iter, insert,
+ &extending,
+ &should_check_enospc,
+ &i_sectors_delta,
+ &disk_sectors_delta);
+ if (ret)
+ goto err;
- if (insert->k.size < k.k->size &&
- bch2_bkey_sectors_compressed(k) &&
- nr > 0) {
+ if (disk_sectors_delta > (s64) op->res.sectors) {
ret = bch2_disk_reservation_add(c, &op->res,
- keylist_sectors(keys) * nr, 0);
+ disk_sectors_delta - op->res.sectors,
+ !should_check_enospc
+ ? BCH_DISK_RESERVATION_NOFAIL : 0);
if (ret)
goto out;
-
- m->nr_ptrs_reserved += nr;
- goto next;
}
bch2_trans_update(&trans, iter, insert, 0);
@@ -168,6 +164,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
op_journal_seq(op),
BTREE_INSERT_NOFAIL|
m->data_opts.btree_insert_flags);
+err:
if (!ret)
atomic_long_inc(&c->extent_migrate_done);
if (ret == -EINTR)