summaryrefslogtreecommitdiffstats
path: root/refs
diff options
context:
space:
mode:
Diffstat (limited to 'refs')
-rw-r--r--refs/debug.c3
-rw-r--r--refs/files-backend.c32
-rw-r--r--refs/iterator.c2
-rw-r--r--refs/packed-backend.c1
-rw-r--r--refs/refs-internal.h5
-rw-r--r--refs/reftable-backend.c438
6 files changed, 316 insertions, 165 deletions
diff --git a/refs/debug.c b/refs/debug.c
index a893ae0c90..fbc4df08b4 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -83,9 +83,8 @@ static void print_update(int i, const char *refname,
static void print_transaction(struct ref_transaction *transaction)
{
- int i;
trace_printf_key(&trace_refs, "transaction {\n");
- for (i = 0; i < transaction->nr; i++) {
+ for (size_t i = 0; i < transaction->nr; i++) {
struct ref_update *u = transaction->updates[i];
print_update(i, u->refname, &u->old_oid, &u->new_oid, u->flags,
u->type, u->msg);
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 8953d1c6d3..5cfb8b7ca8 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -1,4 +1,5 @@
#define USE_THE_REPOSITORY_VARIABLE
+#define DISABLE_SIGN_COMPARE_WARNINGS
#include "../git-compat-util.h"
#include "../abspath.h"
@@ -599,10 +600,9 @@ static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refn
unsigned int type;
ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1);
- if (ret)
- return ret;
-
- return !(type & REF_ISSYMREF);
+ if (!ret && !(type & REF_ISSYMREF))
+ return NOT_A_SYMREF;
+ return ret;
}
int parse_loose_ref_contents(const struct git_hash_algo *algop,
@@ -2519,14 +2519,18 @@ static int split_symref_update(struct ref_update *update,
static int check_old_oid(struct ref_update *update, struct object_id *oid,
struct strbuf *err)
{
+ int ret = TRANSACTION_GENERIC_ERROR;
+
if (!(update->flags & REF_HAVE_OLD) ||
oideq(oid, &update->old_oid))
return 0;
- if (is_null_oid(&update->old_oid))
+ if (is_null_oid(&update->old_oid)) {
strbuf_addf(err, "cannot lock ref '%s': "
"reference already exists",
ref_update_original_update_refname(update));
+ ret = TRANSACTION_CREATE_EXISTS;
+ }
else if (is_null_oid(oid))
strbuf_addf(err, "cannot lock ref '%s': "
"reference is missing but expected %s",
@@ -2539,7 +2543,7 @@ static int check_old_oid(struct ref_update *update, struct object_id *oid,
oid_to_hex(oid),
oid_to_hex(&update->old_oid));
- return -1;
+ return ret;
}
struct files_transaction_backend_data {
@@ -2638,9 +2642,11 @@ static int lock_ref_for_update(struct files_ref_store *refs,
ret = TRANSACTION_GENERIC_ERROR;
goto out;
}
- } else if (check_old_oid(update, &lock->old_oid, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto out;
+ } else {
+ ret = check_old_oid(update, &lock->old_oid, err);
+ if (ret) {
+ goto out;
+ }
}
} else {
/*
@@ -2671,9 +2677,11 @@ static int lock_ref_for_update(struct files_ref_store *refs,
update->old_target);
ret = TRANSACTION_GENERIC_ERROR;
goto out;
- } else if (check_old_oid(update, &lock->old_oid, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto out;
+ } else {
+ ret = check_old_oid(update, &lock->old_oid, err);
+ if (ret) {
+ goto out;
+ }
}
/*
diff --git a/refs/iterator.c b/refs/iterator.c
index 8e999d81fc..d25e568bf0 100644
--- a/refs/iterator.c
+++ b/refs/iterator.c
@@ -3,6 +3,8 @@
* documentation about the design and use of reference iterators.
*/
+#define DISABLE_SIGN_COMPARE_WARNINGS
+
#include "git-compat-util.h"
#include "refs.h"
#include "refs/refs-internal.h"
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index 3406f1e71d..a7b6f74b6e 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -1,4 +1,5 @@
#define USE_THE_REPOSITORY_VARIABLE
+#define DISABLE_SIGN_COMPARE_WARNINGS
#include "../git-compat-util.h"
#include "../config.h"
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 79b287c5ec..16550862d3 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -683,6 +683,11 @@ struct ref_storage_be {
ref_iterator_begin_fn *iterator_begin;
read_raw_ref_fn *read_raw_ref;
+
+ /*
+ * Please refer to `refs_read_symbolic_ref()` for the expected
+ * behaviour.
+ */
read_symbolic_ref_fn *read_symbolic_ref;
reflog_iterator_begin_fn *reflog_iterator_begin;
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index bec5962deb..00d95a9a2f 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -15,6 +15,7 @@
#include "../object.h"
#include "../path.h"
#include "../refs.h"
+#include "../reftable/reftable-basics.h"
#include "../reftable/reftable-stack.h"
#include "../reftable/reftable-record.h"
#include "../reftable/reftable-error.h"
@@ -23,6 +24,7 @@
#include "../setup.h"
#include "../strmap.h"
#include "../trace2.h"
+#include "../write-or-die.h"
#include "parse.h"
#include "refs-internal.h"
@@ -32,24 +34,111 @@
*/
#define REF_UPDATE_VIA_HEAD (1 << 8)
+struct reftable_backend {
+ struct reftable_stack *stack;
+ struct reftable_iterator it;
+};
+
+static void reftable_backend_on_reload(void *payload)
+{
+ struct reftable_backend *be = payload;
+ reftable_iterator_destroy(&be->it);
+}
+
+static int reftable_backend_init(struct reftable_backend *be,
+ const char *path,
+ const struct reftable_write_options *_opts)
+{
+ struct reftable_write_options opts = *_opts;
+ opts.on_reload = reftable_backend_on_reload;
+ opts.on_reload_payload = be;
+ return reftable_new_stack(&be->stack, path, &opts);
+}
+
+static void reftable_backend_release(struct reftable_backend *be)
+{
+ reftable_stack_destroy(be->stack);
+ be->stack = NULL;
+ reftable_iterator_destroy(&be->it);
+}
+
+static int reftable_backend_read_ref(struct reftable_backend *be,
+ const char *refname,
+ struct object_id *oid,
+ struct strbuf *referent,
+ unsigned int *type)
+{
+ struct reftable_ref_record ref = {0};
+ int ret;
+
+ if (!be->it.ops) {
+ ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
+ if (ret)
+ goto done;
+ }
+
+ ret = reftable_iterator_seek_ref(&be->it, refname);
+ if (ret)
+ goto done;
+
+ ret = reftable_iterator_next_ref(&be->it, &ref);
+ if (ret)
+ goto done;
+
+ if (strcmp(ref.refname, refname)) {
+ ret = 1;
+ goto done;
+ }
+
+ if (ref.value_type == REFTABLE_REF_SYMREF) {
+ strbuf_reset(referent);
+ strbuf_addstr(referent, ref.value.symref);
+ *type |= REF_ISSYMREF;
+ } else if (reftable_ref_record_val1(&ref)) {
+ unsigned int hash_id;
+
+ switch (reftable_stack_hash_id(be->stack)) {
+ case REFTABLE_HASH_SHA1:
+ hash_id = GIT_HASH_SHA1;
+ break;
+ case REFTABLE_HASH_SHA256:
+ hash_id = GIT_HASH_SHA256;
+ break;
+ default:
+ BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
+ }
+
+ oidread(oid, reftable_ref_record_val1(&ref),
+ &hash_algos[hash_id]);
+ } else {
+ /* We got a tombstone, which should not happen. */
+ BUG("unhandled reference value type %d", ref.value_type);
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_ref_record_release(&ref);
+ return ret;
+}
+
struct reftable_ref_store {
struct ref_store base;
/*
- * The main stack refers to the common dir and thus contains common
+ * The main backend refers to the common dir and thus contains common
* refs as well as refs of the main repository.
*/
- struct reftable_stack *main_stack;
+ struct reftable_backend main_backend;
/*
- * The worktree stack refers to the gitdir in case the refdb is opened
+ * The worktree backend refers to the gitdir in case the refdb is opened
* via a worktree. It thus contains the per-worktree refs.
*/
- struct reftable_stack *worktree_stack;
+ struct reftable_backend worktree_backend;
/*
- * Map of worktree stacks by their respective worktree names. The map
+ * Map of worktree backends by their respective worktree names. The map
* is populated lazily when we try to resolve `worktrees/$worktree` refs.
*/
- struct strmap worktree_stacks;
+ struct strmap worktree_backends;
struct reftable_write_options write_options;
unsigned int store_flags;
@@ -95,21 +184,25 @@ static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_sto
* like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
* those references in their normalized form.
*/
-static struct reftable_stack *stack_for(struct reftable_ref_store *store,
- const char *refname,
- const char **rewritten_ref)
+static int backend_for(struct reftable_backend **out,
+ struct reftable_ref_store *store,
+ const char *refname,
+ const char **rewritten_ref,
+ int reload)
{
+ struct reftable_backend *be;
const char *wtname;
int wtname_len;
- if (!refname)
- return store->main_stack;
+ if (!refname) {
+ be = &store->main_backend;
+ goto out;
+ }
switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
case REF_WORKTREE_OTHER: {
static struct strbuf wtname_buf = STRBUF_INIT;
struct strbuf wt_dir = STRBUF_INIT;
- struct reftable_stack *stack;
/*
* We're using a static buffer here so that we don't need to
@@ -123,40 +216,55 @@ static struct reftable_stack *stack_for(struct reftable_ref_store *store,
/*
* There is an edge case here: when the worktree references the
* current worktree, then we set up the stack once via
- * `worktree_stacks` and once via `worktree_stack`. This is
+ * `worktree_backends` and once via `worktree_backend`. This is
* wasteful, but in the reading case it shouldn't matter. And
* in the writing case we would notice that the stack is locked
* already and error out when trying to write a reference via
* both stacks.
*/
- stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
- if (!stack) {
+ be = strmap_get(&store->worktree_backends, wtname_buf.buf);
+ if (!be) {
strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
store->base.repo->commondir, wtname_buf.buf);
- store->err = reftable_new_stack(&stack, wt_dir.buf,
- &store->write_options);
+ CALLOC_ARRAY(be, 1);
+ store->err = reftable_backend_init(be, wt_dir.buf,
+ &store->write_options);
assert(store->err != REFTABLE_API_ERROR);
- strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
+
+ strmap_put(&store->worktree_backends, wtname_buf.buf, be);
}
strbuf_release(&wt_dir);
- return stack;
+ goto out;
}
case REF_WORKTREE_CURRENT:
/*
* If there is no worktree stack then we're currently in the
* main worktree. We thus return the main stack in that case.
*/
- if (!store->worktree_stack)
- return store->main_stack;
- return store->worktree_stack;
+ if (!store->worktree_backend.stack)
+ be = &store->main_backend;
+ else
+ be = &store->worktree_backend;
+ goto out;
case REF_WORKTREE_MAIN:
case REF_WORKTREE_SHARED:
- return store->main_stack;
+ be = &store->main_backend;
+ goto out;
default:
BUG("unhandled worktree reference type");
}
+
+out:
+ if (reload) {
+ int ret = reftable_stack_reload(be->stack);
+ if (ret)
+ return ret;
+ }
+ *out = be;
+
+ return 0;
}
static int should_write_log(struct reftable_ref_store *refs, const char *refname)
@@ -205,38 +313,6 @@ static void fill_reftable_log_record(struct reftable_log_record *log, const stru
log->value.update.tz_offset = sign * atoi(tz_begin);
}
-static int read_ref_without_reload(struct reftable_ref_store *refs,
- struct reftable_stack *stack,
- const char *refname,
- struct object_id *oid,
- struct strbuf *referent,
- unsigned int *type)
-{
- struct reftable_ref_record ref = {0};
- int ret;
-
- ret = reftable_stack_read_ref(stack, refname, &ref);
- if (ret)
- goto done;
-
- if (ref.value_type == REFTABLE_REF_SYMREF) {
- strbuf_reset(referent);
- strbuf_addstr(referent, ref.value.symref);
- *type |= REF_ISSYMREF;
- } else if (reftable_ref_record_val1(&ref)) {
- oidread(oid, reftable_ref_record_val1(&ref),
- refs->base.repo->hash_algo);
- } else {
- /* We got a tombstone, which should not happen. */
- BUG("unhandled reference value type %d", ref.value_type);
- }
-
-done:
- assert(ret != REFTABLE_API_ERROR);
- reftable_ref_record_release(&ref);
- return ret;
-}
-
static int reftable_be_config(const char *var, const char *value,
const struct config_context *ctx,
void *_opts)
@@ -272,6 +348,11 @@ static int reftable_be_config(const char *var, const char *value,
return 0;
}
+static int reftable_be_fsync(int fd)
+{
+ return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
+}
+
static struct ref_store *reftable_be_init(struct repository *repo,
const char *gitdir,
unsigned int store_flags)
@@ -285,15 +366,25 @@ static struct ref_store *reftable_be_init(struct repository *repo,
umask(mask);
base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
- strmap_init(&refs->worktree_stacks);
+ strmap_init(&refs->worktree_backends);
refs->store_flags = store_flags;
refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
- refs->write_options.hash_id = repo->hash_algo->format_id;
+ switch (repo->hash_algo->format_id) {
+ case GIT_SHA1_FORMAT_ID:
+ refs->write_options.hash_id = REFTABLE_HASH_SHA1;
+ break;
+ case GIT_SHA256_FORMAT_ID:
+ refs->write_options.hash_id = REFTABLE_HASH_SHA256;
+ break;
+ default:
+ BUG("unknown hash algorithm %d", repo->hash_algo->format_id);
+ }
refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
refs->write_options.disable_auto_compact =
!git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
refs->write_options.lock_timeout_ms = 100;
+ refs->write_options.fsync = reftable_be_fsync;
git_config(reftable_be_config, &refs->write_options);
@@ -320,8 +411,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
strbuf_realpath(&path, gitdir, 0);
}
strbuf_addstr(&path, "/reftable");
- refs->err = reftable_new_stack(&refs->main_stack, path.buf,
- &refs->write_options);
+ refs->err = reftable_backend_init(&refs->main_backend, path.buf,
+ &refs->write_options);
if (refs->err)
goto done;
@@ -337,8 +428,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
strbuf_reset(&path);
strbuf_addf(&path, "%s/reftable", gitdir);
- refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
- &refs->write_options);
+ refs->err = reftable_backend_init(&refs->worktree_backend, path.buf,
+ &refs->write_options);
if (refs->err)
goto done;
}
@@ -357,19 +448,17 @@ static void reftable_be_release(struct ref_store *ref_store)
struct strmap_entry *entry;
struct hashmap_iter iter;
- if (refs->main_stack) {
- reftable_stack_destroy(refs->main_stack);
- refs->main_stack = NULL;
- }
+ if (refs->main_backend.stack)
+ reftable_backend_release(&refs->main_backend);
+ if (refs->worktree_backend.stack)
+ reftable_backend_release(&refs->worktree_backend);
- if (refs->worktree_stack) {
- reftable_stack_destroy(refs->worktree_stack);
- refs->worktree_stack = NULL;
+ strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
+ struct reftable_backend *be = entry->value;
+ reftable_backend_release(be);
+ free(be);
}
-
- strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
- reftable_stack_destroy(entry->value);
- strmap_clear(&refs->worktree_stacks, 0);
+ strmap_clear(&refs->worktree_backends, 0);
}
static int reftable_be_create_on_disk(struct ref_store *ref_store,
@@ -764,7 +853,7 @@ static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_sto
required_flags |= REF_STORE_ODB;
refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
- main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix,
+ main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
exclude_patterns, flags);
/*
@@ -772,14 +861,14 @@ static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_sto
* right now. If we aren't, then we return the common reftable
* iterator, only.
*/
- if (!refs->worktree_stack)
+ if (!refs->worktree_backend.stack)
return &main_iter->base;
/*
* Otherwise we merge both the common and the per-worktree refs into a
* single iterator.
*/
- worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix,
+ worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
exclude_patterns, flags);
return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
ref_iterator_select, NULL);
@@ -794,17 +883,17 @@ static int reftable_be_read_raw_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_backend *be;
int ret;
if (refs->err < 0)
return refs->err;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
return ret;
- ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
+ ret = reftable_backend_read_ref(be, refname, oid, referent, type);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -821,21 +910,22 @@ static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
- struct reftable_ref_record ref = {0};
+ struct reftable_backend *be;
+ struct object_id oid;
+ unsigned int type = 0;
int ret;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
return ret;
- ret = reftable_stack_read_ref(stack, refname, &ref);
- if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
- strbuf_addstr(referent, ref.value.symref);
- else
+ ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
+ if (ret)
ret = -1;
-
- reftable_ref_record_release(&ref);
+ else if (type == REF_ISSYMREF)
+ ; /* happy */
+ else
+ ret = NOT_A_SYMREF;
return ret;
}
@@ -846,7 +936,7 @@ struct reftable_transaction_update {
struct write_transaction_table_arg {
struct reftable_ref_store *refs;
- struct reftable_stack *stack;
+ struct reftable_backend *be;
struct reftable_addition *addition;
struct reftable_transaction_update *updates;
size_t updates_nr;
@@ -881,27 +971,37 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
struct ref_update *update,
struct strbuf *err)
{
- struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
struct write_transaction_table_arg *arg = NULL;
+ struct reftable_backend *be;
size_t i;
int ret;
/*
+ * This function gets called in a loop, and we don't want to repeatedly
+ * reload the stack for every single ref update. Instead, we manually
+ * reload further down in the case where we haven't yet prepared the
+ * specific `reftable_backend`.
+ */
+ ret = backend_for(&be, refs, update->refname, NULL, 0);
+ if (ret)
+ return ret;
+
+ /*
* Search for a preexisting stack update. If there is one then we add
* the update to it, otherwise we set up a new stack update.
*/
for (i = 0; !arg && i < tx_data->args_nr; i++)
- if (tx_data->args[i].stack == stack)
+ if (tx_data->args[i].be == be)
arg = &tx_data->args[i];
if (!arg) {
struct reftable_addition *addition;
- ret = reftable_stack_reload(stack);
+ ret = reftable_stack_reload(be->stack);
if (ret)
return ret;
- ret = reftable_stack_new_addition(&addition, stack,
+ ret = reftable_stack_new_addition(&addition, be->stack,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
if (ret) {
if (ret == REFTABLE_LOCK_ERROR)
@@ -913,7 +1013,7 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
tx_data->args_alloc);
arg = &tx_data->args[tx_data->args_nr++];
arg->refs = refs;
- arg->stack = stack;
+ arg->be = be;
arg->addition = addition;
arg->updates = NULL;
arg->updates_nr = 0;
@@ -968,6 +1068,7 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
struct reftable_transaction_data *tx_data = NULL;
+ struct reftable_backend *be;
struct object_id head_oid;
unsigned int head_type = 0;
size_t i;
@@ -1015,8 +1116,23 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
goto done;
}
- ret = read_ref_without_reload(refs, stack_for(refs, "HEAD", NULL), "HEAD",
- &head_oid, &head_referent, &head_type);
+ /*
+ * TODO: it's dubious whether we should reload the stack that "HEAD"
+ * belongs to or not. In theory, it may happen that we only modify
+ * stacks which are _not_ part of the "HEAD" stack. In that case we
+ * wouldn't have prepared any transaction for its stack and would not
+ * have reloaded it, which may mean that it is stale.
+ *
+ * On the other hand, reloading that stack without locking it feels
+ * wrong, too, as the value of "HEAD" could be modified concurrently at
+ * any point in time.
+ */
+ ret = backend_for(&be, refs, "HEAD", NULL, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
+ &head_referent, &head_type);
if (ret < 0)
goto done;
ret = 0;
@@ -1024,10 +1140,18 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
for (i = 0; i < transaction->nr; i++) {
struct ref_update *u = transaction->updates[i];
struct object_id current_oid = {0};
- struct reftable_stack *stack;
const char *rewritten_ref;
- stack = stack_for(refs, u->refname, &rewritten_ref);
+ /*
+ * There is no need to reload the respective backends here as
+ * we have already reloaded them when preparing the transaction
+ * update. And given that the stacks have been locked there
+ * shouldn't have been any concurrent modifications of the
+ * stack.
+ */
+ ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
+ if (ret)
+ goto done;
/* Verify that the new object ID is valid. */
if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
@@ -1084,8 +1208,8 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
string_list_insert(&affected_refnames, new_update->refname);
}
- ret = read_ref_without_reload(refs, stack, rewritten_ref,
- &current_oid, &referent, &u->type);
+ ret = reftable_backend_read_ref(be, rewritten_ref,
+ &current_oid, &referent, &u->type);
if (ret < 0)
goto done;
if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
@@ -1211,10 +1335,13 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
goto done;
}
} else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
- if (is_null_oid(&u->old_oid))
+ ret = TRANSACTION_NAME_CONFLICT;
+ if (is_null_oid(&u->old_oid)) {
strbuf_addf(err, _("cannot lock ref '%s': "
"reference already exists"),
ref_update_original_update_refname(u));
+ ret = TRANSACTION_CREATE_EXISTS;
+ }
else if (is_null_oid(&current_oid))
strbuf_addf(err, _("cannot lock ref '%s': "
"reference is missing but expected %s"),
@@ -1226,7 +1353,6 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
ref_update_original_update_refname(u),
oid_to_hex(&current_oid),
oid_to_hex(&u->old_oid));
- ret = -1;
goto done;
}
@@ -1298,7 +1424,7 @@ static int transaction_update_cmp(const void *a, const void *b)
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
{
struct write_transaction_table_arg *arg = cb_data;
- uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
struct reftable_log_record *logs = NULL;
struct ident_split committer_ident = {0};
size_t logs_nr = 0, logs_alloc = 0, i;
@@ -1335,7 +1461,7 @@ static int write_transaction_table(struct reftable_writer *writer, void *cb_data
struct reftable_log_record log = {0};
struct reftable_iterator it = {0};
- ret = reftable_stack_init_log_iterator(arg->stack, &it);
+ ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
if (ret < 0)
goto done;
@@ -1540,9 +1666,9 @@ static int reftable_be_pack_refs(struct ref_store *ref_store,
if (refs->err)
return refs->err;
- stack = refs->worktree_stack;
+ stack = refs->worktree_backend.stack;
if (!stack)
- stack = refs->main_stack;
+ stack = refs->main_backend.stack;
if (opts->flags & PACK_REFS_AUTO)
ret = reftable_stack_auto_compact(stack);
@@ -1573,7 +1699,7 @@ struct write_create_symref_arg {
struct write_copy_arg {
struct reftable_ref_store *refs;
- struct reftable_stack *stack;
+ struct reftable_backend *be;
const char *oldname;
const char *newname;
const char *logmsg;
@@ -1598,7 +1724,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
BUG("failed splitting committer info");
- if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
+ if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
ret = error(_("refname %s not found"), arg->oldname);
goto done;
}
@@ -1637,7 +1763,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
* the old branch and the creation of the new branch, and we cannot do
* two changes to a reflog in a single update.
*/
- deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
+ deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
if (arg->delete_old)
creation_ts++;
reftable_writer_set_limits(writer, deletion_ts, creation_ts);
@@ -1680,8 +1806,8 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
logs_nr++;
- ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid,
- &head_referent, &head_type);
+ ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
+ &head_referent, &head_type);
if (ret < 0)
goto done;
append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
@@ -1724,7 +1850,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data)
* copy over all log entries from the old reflog. Last but not least,
* when renaming we also have to delete all the old reflog entries.
*/
- ret = reftable_stack_init_log_iterator(arg->stack, &it);
+ ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
if (ret < 0)
goto done;
@@ -1797,10 +1923,8 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
- struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
struct write_copy_arg arg = {
.refs = refs,
- .stack = stack,
.oldname = oldrefname,
.newname = newrefname,
.logmsg = logmsg,
@@ -1812,10 +1936,10 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
if (ret)
goto done;
- ret = reftable_stack_add(stack, &write_copy_table, &arg);
+ ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
done:
assert(ret != REFTABLE_API_ERROR);
@@ -1829,10 +1953,8 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
- struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
struct write_copy_arg arg = {
.refs = refs,
- .stack = stack,
.oldname = oldrefname,
.newname = newrefname,
.logmsg = logmsg,
@@ -1843,10 +1965,10 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
if (ret)
goto done;
- ret = reftable_stack_add(stack, &write_copy_table, &arg);
+ ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
done:
assert(ret != REFTABLE_API_ERROR);
@@ -1967,11 +2089,11 @@ static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *
reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
struct reftable_reflog_iterator *main_iter, *worktree_iter;
- main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
- if (!refs->worktree_stack)
+ main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
+ if (!refs->worktree_backend.stack)
return &main_iter->base;
- worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
+ worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
ref_iterator_select, NULL);
@@ -2010,15 +2132,23 @@ static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record log = {0};
struct reftable_iterator it = {0};
+ struct reftable_backend *be;
int ret;
if (refs->err < 0)
return refs->err;
- ret = reftable_stack_init_log_iterator(stack, &it);
+ /*
+ * TODO: we should adapt this callsite to reload the stack. There is no
+ * obvious reason why we shouldn't.
+ */
+ ret = backend_for(&be, refs, refname, &refname, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
if (ret < 0)
goto done;
@@ -2050,16 +2180,24 @@ static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record *logs = NULL;
struct reftable_iterator it = {0};
+ struct reftable_backend *be;
size_t logs_alloc = 0, logs_nr = 0, i;
int ret;
if (refs->err < 0)
return refs->err;
- ret = reftable_stack_init_log_iterator(stack, &it);
+ /*
+ * TODO: we should adapt this callsite to reload the stack. There is no
+ * obvious reason why we shouldn't.
+ */
+ ret = backend_for(&be, refs, refname, &refname, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
if (ret < 0)
goto done;
@@ -2099,20 +2237,20 @@ static int reftable_be_reflog_exists(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record log = {0};
struct reftable_iterator it = {0};
+ struct reftable_backend *be;
int ret;
ret = refs->err;
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret < 0)
goto done;
- ret = reftable_stack_init_log_iterator(stack, &it);
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
if (ret < 0)
goto done;
@@ -2184,10 +2322,9 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_backend *be;
struct write_reflog_existence_arg arg = {
.refs = refs,
- .stack = stack,
.refname = refname,
};
int ret;
@@ -2196,11 +2333,12 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
goto done;
+ arg.stack = be->stack;
- ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
+ ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
done:
return ret;
@@ -2258,17 +2396,18 @@ static int reftable_be_delete_reflog(struct ref_store *ref_store,
{
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
+ struct reftable_backend *be;
struct write_reflog_delete_arg arg = {
- .stack = stack,
.refname = refname,
};
int ret;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret)
return ret;
- ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
+ arg.stack = be->stack;
+
+ ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
assert(ret != REFTABLE_API_ERROR);
return ret;
@@ -2367,26 +2506,27 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
*/
struct reftable_ref_store *refs =
reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
- struct reftable_stack *stack = stack_for(refs, refname, &refname);
struct reftable_log_record *logs = NULL;
struct reftable_log_record *rewritten = NULL;
- struct reftable_ref_record ref_record = {0};
struct reftable_iterator it = {0};
struct reftable_addition *add = NULL;
struct reflog_expiry_arg arg = {0};
+ struct reftable_backend *be;
struct object_id oid = {0};
+ struct strbuf referent = STRBUF_INIT;
uint8_t *last_hash = NULL;
size_t logs_nr = 0, logs_alloc = 0, i;
+ unsigned int type = 0;
int ret;
if (refs->err < 0)
return refs->err;
- ret = reftable_stack_reload(stack);
+ ret = backend_for(&be, refs, refname, &refname, 1);
if (ret < 0)
goto done;
- ret = reftable_stack_init_log_iterator(stack, &it);
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
if (ret < 0)
goto done;
@@ -2394,16 +2534,13 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
if (ret < 0)
goto done;
- ret = reftable_stack_new_addition(&add, stack, 0);
+ ret = reftable_stack_new_addition(&add, be->stack, 0);
if (ret < 0)
goto done;
- ret = reftable_stack_read_ref(stack, refname, &ref_record);
+ ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
if (ret < 0)
goto done;
- if (reftable_ref_record_val1(&ref_record))
- oidread(&oid, reftable_ref_record_val1(&ref_record),
- ref_store->repo->hash_algo);
prepare_fn(refname, &oid, policy_cb_data);
while (1) {
@@ -2470,15 +2607,14 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
}
}
- if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
- reftable_ref_record_val1(&ref_record))
+ if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
arg.refs = refs;
arg.records = rewritten;
arg.len = logs_nr;
- arg.stack = stack,
- arg.refname = refname,
+ arg.stack = be->stack;
+ arg.refname = refname;
ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
if (ret < 0)
@@ -2496,11 +2632,11 @@ done:
cleanup_fn(policy_cb_data);
assert(ret != REFTABLE_API_ERROR);
- reftable_ref_record_release(&ref_record);
reftable_iterator_destroy(&it);
reftable_addition_destroy(add);
for (i = 0; i < logs_nr; i++)
reftable_log_record_release(&logs[i]);
+ strbuf_release(&referent);
free(logs);
free(rewritten);
return ret;