summaryrefslogtreecommitdiffstats
path: root/lib/seqlock.h
diff options
context:
space:
mode:
authorDavid Lamparter <equinox@diac24.net>2019-06-25 20:33:02 +0200
committerDavid Lamparter <equinox@diac24.net>2019-07-31 03:33:41 +0200
commit30ef834ab3b50c09e103a82742b67ae4b0bac9f5 (patch)
treef6ec88f064e058d78dadabe10bcb87aa58dc9157 /lib/seqlock.h
parentlib/seqlock: add timed-wait operation (diff)
downloadfrr-30ef834ab3b50c09e103a82742b67ae4b0bac9f5.tar.xz
frr-30ef834ab3b50c09e103a82742b67ae4b0bac9f5.zip
lib/seqlock: add a few more comments
Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
Diffstat (limited to 'lib/seqlock.h')
-rw-r--r--lib/seqlock.h28
1 files changed, 26 insertions, 2 deletions
diff --git a/lib/seqlock.h b/lib/seqlock.h
index 3cce9ccf4..b551e3ffc 100644
--- a/lib/seqlock.h
+++ b/lib/seqlock.h
@@ -61,12 +61,22 @@ typedef _Atomic uint32_t seqlock_ctr_t;
typedef uint32_t seqlock_val_t;
#define seqlock_assert_valid(val) assert((val) & SEQLOCK_HELD)
+/* NB: SEQLOCK_WAITERS is only allowed if SEQLOCK_HELD is also set; can't
+ * have waiters on an unheld seqlock
+ */
#define SEQLOCK_HELD (1U << 0)
#define SEQLOCK_WAITERS (1U << 1)
#define SEQLOCK_VAL(n) ((n) & ~SEQLOCK_WAITERS)
#define SEQLOCK_STARTVAL 1U
#define SEQLOCK_INCR 4U
+/* TODO: originally, this was using "atomic_fetch_add", which is the reason
+ * bit 0 is used to indicate held state. With SEQLOCK_WAITERS added, there's
+ * no fetch_add anymore (cmpxchg loop instead), so we don't need to use bit 0
+ * for this anymore & can just special-case the value 0 for it and skip it in
+ * counting.
+ */
+
struct seqlock {
/* always used */
seqlock_ctr_t pos;
@@ -80,10 +90,16 @@ struct seqlock {
extern void seqlock_init(struct seqlock *sqlo);
-/* while (sqlo <= val) - wait until seqlock->pos > val, or seqlock unheld */
+/* basically: "while (sqlo <= val) wait();"
+ * returns when sqlo > val || !seqlock_held(sqlo)
+ */
extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
+
+/* same, but time-limited (limit is an absolute CLOCK_MONOTONIC value) */
extern bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
const struct timespec *abs_monotime_limit);
+
+/* one-shot test, returns true if seqlock_wait would return immediately */
extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
static inline bool seqlock_held(struct seqlock *sqlo)
@@ -93,12 +109,20 @@ static inline bool seqlock_held(struct seqlock *sqlo)
/* sqlo - get seqlock position -- for the "counter" seqlock */
extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
-/* sqlo++ - note: like x++, returns previous value, before bumping */
+
+/* ++sqlo (but atomic & wakes waiters) - returns value that we bumped to.
+ *
+ * guarantees:
+ * - each seqlock_bump call bumps the position by exactly one SEQLOCK_INCR.
+ * There are no skipped/missed or multiple increments.
+ * - each return value is only returned from one seqlock_bump() call
+ */
extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
/* sqlo = val - can be used on held seqlock. */
extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
+
/* sqlo = ref - standard pattern: acquire relative to other seqlock */
static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
{