diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-03-05 05:05:55 +0100 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 23:09:55 +0200 |
commit | ac2ccddc2689d5889fd1520383738b60dbafc1d0 (patch) | |
tree | c090c0f7749633a7d03ba3790fc2f9ec1b396318 /fs/bcachefs/bset.c | |
parent | bcachefs: BKEY_PADDED_ONSTACK() (diff) | |
download | linux-ac2ccddc2689d5889fd1520383738b60dbafc1d0.tar.xz linux-ac2ccddc2689d5889fd1520383738b60dbafc1d0.zip |
bcachefs: Drop some anonymous structs, unions
Rust bindgen doesn't cope well with anonymous structs and unions. This
patch drops the fancy anonymous structs & unions in bkey_i that let us
use the same helpers for bkey_i and bkey_packed; since bkey_packed is an
internal type that's never exposed to outside code, it's only a minor
inconvenienc.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bset.c')
-rw-r--r-- | fs/bcachefs/bset.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index 50a1c9d8ebab..a7cb12ea2a04 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -66,7 +66,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, for (_k = i->start; _k < vstruct_last(i); _k = _n) { - _n = bkey_next(_k); + _n = bkey_p_next(_k); k = bkey_disassemble(b, _k, &uk); @@ -539,7 +539,7 @@ start: rw_aux_tree(b, t)[j - 1].offset); } - k = bkey_next(k); + k = bkey_p_next(k); BUG_ON(k >= btree_bkey_last(b, t)); } } @@ -730,7 +730,7 @@ retry: /* First we figure out where the first key in each cacheline is */ eytzinger1_for_each(j, t->size - 1) { while (bkey_to_cacheline(b, t, k) < cacheline) - prev = k, k = bkey_next(k); + prev = k, k = bkey_p_next(k); if (k >= btree_bkey_last(b, t)) { /* XXX: this path sucks */ @@ -747,7 +747,7 @@ retry: } while (k != btree_bkey_last(b, t)) - prev = k, k = bkey_next(k); + prev = k, k = bkey_p_next(k); if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) { bkey_init(&min_key.k); @@ -885,7 +885,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, struct bkey_packed *p, *i, *ret = NULL, *orig_k = k; while ((p = __bkey_prev(b, t, k)) && !ret) { - for (i = p; i != k; i = bkey_next(i)) + for (i = p; i != k; i = bkey_p_next(i)) if (i->type >= min_key_type) ret = i; @@ -896,10 +896,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, BUG_ON(ret >= orig_k); for (i = ret - ? bkey_next(ret) + ? bkey_p_next(ret) : btree_bkey_first(b, t); i != orig_k; - i = bkey_next(i)) + i = bkey_p_next(i)) BUG_ON(i->type >= min_key_type); } @@ -971,7 +971,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b, struct bkey_packed *k = start; while (1) { - k = bkey_next(k); + k = bkey_p_next(k); if (k == end) break; @@ -1205,12 +1205,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b, while (m != btree_bkey_last(b, t) && bkey_iter_cmp_p_or_unp(b, m, lossy_packed_search, search) < 0) - m = bkey_next(m); + m = bkey_p_next(m); if (!packed_search) while (m != btree_bkey_last(b, t) && bkey_iter_pos_cmp(b, m, search) < 0) - m = bkey_next(m); + m = bkey_p_next(m); if (bch2_expensive_debug_checks) { struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); |