diff options
Diffstat (limited to 'drivers/md/dm-bufio.c')
-rw-r--r-- | drivers/md/dm-bufio.c | 1938 |
1 files changed, 1342 insertions, 596 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cf077f9b30c3..8a448185662c 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -21,6 +21,8 @@ #include <linux/stacktrace.h> #include <linux/jump_label.h> +#include "dm.h" + #define DM_MSG_PREFIX "bufio" /* @@ -66,57 +68,241 @@ #define LIST_DIRTY 1 #define LIST_SIZE 2 +/*--------------------------------------------------------------*/ + /* - * Linking of buffers: - * All buffers are linked to buffer_tree with their node field. - * - * Clean buffers that are not being written (B_WRITING not set) - * are linked to lru[LIST_CLEAN] with their lru_list field. - * - * Dirty and clean buffers that are being written are linked to - * lru[LIST_DIRTY] with their lru_list field. When the write - * finishes, the buffer cannot be relinked immediately (because we - * are in an interrupt context and relinking requires process - * context), so some clean-not-writing buffers can be held on - * dirty_lru too. They are later added to lru in the process - * context. + * Rather than use an LRU list, we use a clock algorithm where entries + * are held in a circular list. When an entry is 'hit' a reference bit + * is set. The least recently used entry is approximated by running a + * cursor around the list selecting unreferenced entries. Referenced + * entries have their reference bit cleared as the cursor passes them. */ -struct dm_bufio_client { - struct mutex lock; - spinlock_t spinlock; - bool no_sleep; +struct lru_entry { + struct list_head list; + atomic_t referenced; +}; - struct list_head lru[LIST_SIZE]; - unsigned long n_buffers[LIST_SIZE]; +struct lru_iter { + struct lru *lru; + struct list_head list; + struct lru_entry *stop; + struct lru_entry *e; +}; - struct block_device *bdev; - unsigned int block_size; - s8 sectors_per_block_bits; - void (*alloc_callback)(struct dm_buffer *buf); - void (*write_callback)(struct dm_buffer *buf); - struct kmem_cache *slab_buffer; - struct kmem_cache *slab_cache; - struct dm_io_client *dm_io; +struct lru { + struct list_head *cursor; + unsigned long count; - struct list_head reserved_buffers; - unsigned int need_reserved_buffers; + struct list_head iterators; +}; - unsigned int minimum_buffers; +/*--------------*/ - struct rb_root buffer_tree; - wait_queue_head_t free_buffer_wait; +static void lru_init(struct lru *lru) +{ + lru->cursor = NULL; + lru->count = 0; + INIT_LIST_HEAD(&lru->iterators); +} - sector_t start; +static void lru_destroy(struct lru *lru) +{ + WARN_ON_ONCE(lru->cursor); + WARN_ON_ONCE(!list_empty(&lru->iterators)); +} - int async_write_error; +/* + * Insert a new entry into the lru. + */ +static void lru_insert(struct lru *lru, struct lru_entry *le) +{ + /* + * Don't be tempted to set to 1, makes the lru aspect + * perform poorly. + */ + atomic_set(&le->referenced, 0); - struct list_head client_list; + if (lru->cursor) { + list_add_tail(&le->list, lru->cursor); + } else { + INIT_LIST_HEAD(&le->list); + lru->cursor = &le->list; + } + lru->count++; +} - struct shrinker shrinker; - struct work_struct shrink_work; - atomic_long_t need_shrink; +/*--------------*/ + +/* + * Convert a list_head pointer to an lru_entry pointer. + */ +static inline struct lru_entry *to_le(struct list_head *l) +{ + return container_of(l, struct lru_entry, list); +} + +/* + * Initialize an lru_iter and add it to the list of cursors in the lru. + */ +static void lru_iter_begin(struct lru *lru, struct lru_iter *it) +{ + it->lru = lru; + it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; + it->e = lru->cursor ? to_le(lru->cursor) : NULL; + list_add(&it->list, &lru->iterators); +} + +/* + * Remove an lru_iter from the list of cursors in the lru. + */ +static inline void lru_iter_end(struct lru_iter *it) +{ + list_del(&it->list); +} + +/* Predicate function type to be used with lru_iter_next */ +typedef bool (*iter_predicate)(struct lru_entry *le, void *context); + +/* + * Advance the cursor to the next entry that passes the + * predicate, and return that entry. Returns NULL if the + * iteration is complete. + */ +static struct lru_entry *lru_iter_next(struct lru_iter *it, + iter_predicate pred, void *context) +{ + struct lru_entry *e; + + while (it->e) { + e = it->e; + + /* advance the cursor */ + if (it->e == it->stop) + it->e = NULL; + else + it->e = to_le(it->e->list.next); + + if (pred(e, context)) + return e; + } + + return NULL; +} + +/* + * Invalidate a specific lru_entry and update all cursors in + * the lru accordingly. + */ +static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e) +{ + struct lru_iter *it; + + list_for_each_entry(it, &lru->iterators, list) { + /* Move c->e forwards if necc. */ + if (it->e == e) { + it->e = to_le(it->e->list.next); + if (it->e == e) + it->e = NULL; + } + + /* Move it->stop backwards if necc. */ + if (it->stop == e) { + it->stop = to_le(it->stop->list.prev); + if (it->stop == e) + it->stop = NULL; + } + } +} + +/*--------------*/ + +/* + * Remove a specific entry from the lru. + */ +static void lru_remove(struct lru *lru, struct lru_entry *le) +{ + lru_iter_invalidate(lru, le); + if (lru->count == 1) { + lru->cursor = NULL; + } else { + if (lru->cursor == &le->list) + lru->cursor = lru->cursor->next; + list_del(&le->list); + } + lru->count--; +} + +/* + * Mark as referenced. + */ +static inline void lru_reference(struct lru_entry *le) +{ + atomic_set(&le->referenced, 1); +} + +/*--------------*/ + +/* + * Remove the least recently used entry (approx), that passes the predicate. + * Returns NULL on failure. + */ +enum evict_result { + ER_EVICT, + ER_DONT_EVICT, + ER_STOP, /* stop looking for something to evict */ }; +typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); + +static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context) +{ + unsigned long tested = 0; + struct list_head *h = lru->cursor; + struct lru_entry *le; + + if (!h) + return NULL; + /* + * In the worst case we have to loop around twice. Once to clear + * the reference flags, and then again to discover the predicate + * fails for all entries. + */ + while (tested < lru->count) { + le = container_of(h, struct lru_entry, list); + + if (atomic_read(&le->referenced)) { + atomic_set(&le->referenced, 0); + } else { + tested++; + switch (pred(le, context)) { + case ER_EVICT: + /* + * Adjust the cursor, so we start the next + * search from here. + */ + lru->cursor = le->list.next; + lru_remove(lru, le); + return le; + + case ER_DONT_EVICT: + break; + + case ER_STOP: + lru->cursor = le->list.next; + return NULL; + } + } + + h = h->next; + + cond_resched(); + } + + return NULL; +} + +/*--------------------------------------------------------------*/ + /* * Buffer state bits. */ @@ -137,26 +323,37 @@ enum data_mode { }; struct dm_buffer { + /* protected by the locks in dm_buffer_cache */ struct rb_node node; - struct list_head lru_list; - struct list_head global_list; + + /* immutable, so don't need protecting */ sector_t block; void *data; unsigned char data_mode; /* DATA_MODE_* */ + + /* + * These two fields are used in isolation, so do not need + * a surrounding lock. + */ + atomic_t hold_count; + unsigned long last_accessed; + + /* + * Everything else is protected by the mutex in + * dm_bufio_client + */ + unsigned long state; + struct lru_entry lru; unsigned char list_mode; /* LIST_* */ blk_status_t read_error; blk_status_t write_error; - unsigned int accessed; - unsigned int hold_count; - unsigned long state; - unsigned long last_accessed; unsigned int dirty_start; unsigned int dirty_end; unsigned int write_start; unsigned int write_end; - struct dm_bufio_client *c; struct list_head write_list; - void (*end_io)(struct dm_buffer *buf, blk_status_t stat); + struct dm_bufio_client *c; + void (*end_io)(struct dm_buffer *b, blk_status_t bs); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING #define MAX_STACK 10 unsigned int stack_len; @@ -164,126 +361,507 @@ struct dm_buffer { #endif }; -static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); +/*--------------------------------------------------------------*/ -/*----------------------------------------------------------------*/ +/* + * The buffer cache manages buffers, particularly: + * - inc/dec of holder count + * - setting the last_accessed field + * - maintains clean/dirty state along with lru + * - selecting buffers that match predicates + * + * It does *not* handle: + * - allocation/freeing of buffers. + * - IO + * - Eviction or cache sizing. + * + * cache_get() and cache_put() are threadsafe, you do not need to + * protect these calls with a surrounding mutex. All the other + * methods are not threadsafe; they do use locking primitives, but + * only enough to ensure get/put are threadsafe. + */ -#define dm_bufio_in_request() (!!current->bio_list) +struct buffer_tree { + struct rw_semaphore lock; + struct rb_root root; +} ____cacheline_aligned_in_smp; -static void dm_bufio_lock(struct dm_bufio_client *c) +struct dm_buffer_cache { + struct lru lru[LIST_SIZE]; + /* + * We spread entries across multiple trees to reduce contention + * on the locks. + */ + unsigned int num_locks; + struct buffer_tree trees[]; +}; + +static inline unsigned int cache_index(sector_t block, unsigned int num_locks) { - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) - spin_lock_bh(&c->spinlock); - else - mutex_lock_nested(&c->lock, dm_bufio_in_request()); + return dm_hash_locks_index(block, num_locks); } -static int dm_bufio_trylock(struct dm_bufio_client *c) +static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) { - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) - return spin_trylock_bh(&c->spinlock); + down_read(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) +{ + up_read(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) +{ + down_write(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) +{ + up_write(&bc->trees[cache_index(block, bc->num_locks)].lock); +} + +/* + * Sometimes we want to repeatedly get and drop locks as part of an iteration. + * This struct helps avoid redundant drop and gets of the same lock. + */ +struct lock_history { + struct dm_buffer_cache *cache; + bool write; + unsigned int previous; + unsigned int no_previous; +}; + +static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) +{ + lh->cache = cache; + lh->write = write; + lh->no_previous = cache->num_locks; + lh->previous = lh->no_previous; +} + +static void __lh_lock(struct lock_history *lh, unsigned int index) +{ + if (lh->write) + down_write(&lh->cache->trees[index].lock); else - return mutex_trylock(&c->lock); + down_read(&lh->cache->trees[index].lock); } -static void dm_bufio_unlock(struct dm_bufio_client *c) +static void __lh_unlock(struct lock_history *lh, unsigned int index) { - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) - spin_unlock_bh(&c->spinlock); + if (lh->write) + up_write(&lh->cache->trees[index].lock); else - mutex_unlock(&c->lock); + up_read(&lh->cache->trees[index].lock); } -/*----------------------------------------------------------------*/ +/* + * Make sure you call this since it will unlock the final lock. + */ +static void lh_exit(struct lock_history *lh) +{ + if (lh->previous != lh->no_previous) { + __lh_unlock(lh, lh->previous); + lh->previous = lh->no_previous; + } +} /* - * Default cache size: available memory divided by the ratio. + * Named 'next' because there is no corresponding + * 'up/unlock' call since it's done automatically. */ -static unsigned long dm_bufio_default_cache_size; +static void lh_next(struct lock_history *lh, sector_t b) +{ + unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ + + if (lh->previous != lh->no_previous) { + if (lh->previous != index) { + __lh_unlock(lh, lh->previous); + __lh_lock(lh, index); + lh->previous = index; + } + } else { + __lh_lock(lh, index); + lh->previous = index; + } +} + +static inline struct dm_buffer *le_to_buffer(struct lru_entry *le) +{ + return container_of(le, struct dm_buffer, lru); +} + +static struct dm_buffer *list_to_buffer(struct list_head *l) +{ + struct lru_entry *le = list_entry(l, struct lru_entry, list); + + if (!le) + return NULL; + + return le_to_buffer(le); +} + +static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks) +{ + unsigned int i; + + bc->num_locks = num_locks; + + for (i = 0; i < bc->num_locks; i++) { + init_rwsem(&bc->trees[i].lock); + bc->trees[i].root = RB_ROOT; + } + + lru_init(&bc->lru[LIST_CLEAN]); + lru_init(&bc->lru[LIST_DIRTY]); +} + +static void cache_destroy(struct dm_buffer_cache *bc) +{ + unsigned int i; + + for (i = 0; i < bc->num_locks; i++) + WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); + + lru_destroy(&bc->lru[LIST_CLEAN]); + lru_destroy(&bc->lru[LIST_DIRTY]); +} + +/*--------------*/ /* - * Total cache size set by the user. + * not threadsafe, or racey depending how you look at it */ -static unsigned long dm_bufio_cache_size; +static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) +{ + return bc->lru[list_mode].count; +} + +static inline unsigned long cache_total(struct dm_buffer_cache *bc) +{ + return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); +} + +/*--------------*/ /* - * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change - * at any time. If it disagrees, the user has changed cache size. + * Gets a specific buffer, indexed by block. + * If the buffer is found then its holder count will be incremented and + * lru_reference will be called. + * + * threadsafe */ -static unsigned long dm_bufio_cache_size_latch; +static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) +{ + struct rb_node *n = root->rb_node; + struct dm_buffer *b; -static DEFINE_SPINLOCK(global_spinlock); + while (n) { + b = container_of(n, struct dm_buffer, node); + + if (b->block == block) + return b; + + n = block < b->block ? n->rb_left : n->rb_right; + } + + return NULL; +} + +static void __cache_inc_buffer(struct dm_buffer *b) +{ + atomic_inc(&b->hold_count); + WRITE_ONCE(b->last_accessed, jiffies); +} + +static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) +{ + struct dm_buffer *b; + + cache_read_lock(bc, block); + b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); + if (b) { + lru_reference(&b->lru); + __cache_inc_buffer(b); + } + cache_read_unlock(bc, block); -static LIST_HEAD(global_queue); + return b; +} -static unsigned long global_num; +/*--------------*/ /* - * Buffers are freed after this timeout + * Returns true if the hold count hits zero. + * threadsafe */ -static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; -static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; +static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) +{ + bool r; -static unsigned long dm_bufio_peak_allocated; -static unsigned long dm_bufio_allocated_kmem_cache; -static unsigned long dm_bufio_allocated_get_free_pages; -static unsigned long dm_bufio_allocated_vmalloc; -static unsigned long dm_bufio_current_allocated; + cache_read_lock(bc, b->block); + BUG_ON(!atomic_read(&b->hold_count)); + r = atomic_dec_and_test(&b->hold_count); + cache_read_unlock(bc, b->block); -/*----------------------------------------------------------------*/ + return r; +} + +/*--------------*/ + +typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); /* - * The current number of clients. + * Evicts a buffer based on a predicate. The oldest buffer that + * matches the predicate will be selected. In addition to the + * predicate the hold_count of the selected buffer will be zero. */ -static int dm_bufio_client_count; +struct evict_wrapper { + struct lock_history *lh; + b_predicate pred; + void *context; +}; /* - * The list of all clients. + * Wraps the buffer predicate turning it into an lru predicate. Adds + * extra test for hold_count. */ -static LIST_HEAD(dm_bufio_all_clients); +static enum evict_result __evict_pred(struct lru_entry *le, void *context) +{ + struct evict_wrapper *w = context; + struct dm_buffer *b = le_to_buffer(le); + + lh_next(w->lh, b->block); + + if (atomic_read(&b->hold_count)) + return ER_DONT_EVICT; + + return w->pred(b, w->context); +} + +static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, + b_predicate pred, void *context, + struct lock_history *lh) +{ + struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; + struct lru_entry *le; + struct dm_buffer *b; + + le = lru_evict(&bc->lru[list_mode], __evict_pred, &w); + if (!le) + return NULL; + + b = le_to_buffer(le); + /* __evict_pred will have locked the appropriate tree. */ + rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); + + return b; +} + +static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, + b_predicate pred, void *context) +{ + struct dm_buffer *b; + struct lock_history lh; + + lh_init(&lh, bc, true); + b = __cache_evict(bc, list_mode, pred, context, &lh); + lh_exit(&lh); + + return b; +} + +/*--------------*/ /* - * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count + * Mark a buffer as clean or dirty. Not threadsafe. */ -static DEFINE_MUTEX(dm_bufio_clients_lock); +static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) +{ + cache_write_lock(bc, b->block); + if (list_mode != b->list_mode) { + lru_remove(&bc->lru[b->list_mode], &b->lru); + b->list_mode = list_mode; + lru_insert(&bc->lru[b->list_mode], &b->lru); + } + cache_write_unlock(bc, b->block); +} -static struct workqueue_struct *dm_bufio_wq; -static struct delayed_work dm_bufio_cleanup_old_work; -static struct work_struct dm_bufio_replacement_work; +/*--------------*/ +/* + * Runs through the lru associated with 'old_mode', if the predicate matches then + * it moves them to 'new_mode'. Not threadsafe. + */ +static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, + b_predicate pred, void *context, struct lock_history *lh) +{ + struct lru_entry *le; + struct dm_buffer *b; + struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; -#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING -static void buffer_record_stack(struct dm_buffer *b) + while (true) { + le = lru_evict(&bc->lru[old_mode], __evict_pred, &w); + if (!le) + break; + + b = le_to_buffer(le); + b->list_mode = new_mode; + lru_insert(&bc->lru[b->list_mode], &b->lru); + } +} + +static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, + b_predicate pred, void *context) { - b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); + struct lock_history lh; + + lh_init(&lh, bc, true); + __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); + lh_exit(&lh); } -#endif + +/*--------------*/ + +/* + * Iterates through all clean or dirty entries calling a function for each + * entry. The callback may terminate the iteration early. Not threadsafe. + */ /* - *---------------------------------------------------------------- - * A red/black tree acts as an index for all the buffers. - *---------------------------------------------------------------- + * Iterator functions should return one of these actions to indicate + * how the iteration should proceed. */ -static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) +enum it_action { + IT_NEXT, + IT_COMPLETE, +}; + +typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context); + +static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, + iter_fn fn, void *context, struct lock_history *lh) { - struct rb_node *n = c->buffer_tree.rb_node; - struct dm_buffer *b; + struct lru *lru = &bc->lru[list_mode]; + struct lru_entry *le, *first; - while (n) { - b = container_of(n, struct dm_buffer, node); + if (!lru->cursor) + return; - if (b->block == block) - return b; + first = le = to_le(lru->cursor); + do { + struct dm_buffer *b = le_to_buffer(le); - n = block < b->block ? n->rb_left : n->rb_right; + lh_next(lh, b->block); + + switch (fn(b, context)) { + case IT_NEXT: + break; + + case IT_COMPLETE: + return; + } + cond_resched(); + + le = to_le(le->list.next); + } while (le != first); +} + +static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, + iter_fn fn, void *context) +{ + struct lock_history lh; + + lh_init(&lh, bc, false); + __cache_iterate(bc, list_mode, fn, context, &lh); + lh_exit(&lh); +} + +/*--------------*/ + +/* + * Passes ownership of the buffer to the cache. Returns false if the + * buffer was already present (in which case ownership does not pass). + * eg, a race with another thread. + * + * Holder count should be 1 on insertion. + * + * Not threadsafe. + */ +static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + struct dm_buffer *found; + + while (*new) { + found = container_of(*new, struct dm_buffer, node); + + if (found->block == b->block) + return false; + + parent = *new; + new = b->block < found->block ? + &found->node.rb_left : &found->node.rb_right; } - return NULL; + rb_link_node(&b->node, parent, new); + rb_insert_color(&b->node, root); + + return true; +} + +static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) +{ + bool r; + + if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) + return false; + + cache_write_lock(bc, b->block); + BUG_ON(atomic_read(&b->hold_count) != 1); + r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); + if (r) + lru_insert(&bc->lru[b->list_mode], &b->lru); + cache_write_unlock(bc, b->block); + + return r; } -static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block) +/*--------------*/ + +/* + * Removes buffer from cache, ownership of the buffer passes back to the caller. + * Fails if the hold_count is not one (ie. the caller holds the only reference). + * + * Not threadsafe. + */ +static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) { - struct rb_node *n = c->buffer_tree.rb_node; + bool r; + + cache_write_lock(bc, b->block); + + if (atomic_read(&b->hold_count) != 1) { + r = false; + } else { + r = true; + rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); + lru_remove(&bc->lru[b->list_mode], &b->lru); + } + + cache_write_unlock(bc, b->block); + + return r; +} + +/*--------------*/ + +typedef void (*b_release)(struct dm_buffer *); + +static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) +{ + struct rb_node *n = root->rb_node; struct dm_buffer *b; struct dm_buffer *best = NULL; @@ -304,35 +882,188 @@ static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block) return best; } -static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) +static void __remove_range(struct dm_buffer_cache *bc, + struct rb_root *root, + sector_t begin, sector_t end, + b_predicate pred, b_release release) { - struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; - struct dm_buffer *found; + struct dm_buffer *b; - while (*new) { - found = container_of(*new, struct dm_buffer, node); + while (true) { + cond_resched(); - if (found->block == b->block) { - BUG_ON(found != b); - return; + b = __find_next(root, begin); + if (!b || (b->block >= end)) + break; + + begin = b->block + 1; + + if (atomic_read(&b->hold_count)) + continue; + + if (pred(b, NULL) == ER_EVICT) { + rb_erase(&b->node, root); + lru_remove(&bc->lru[b->list_mode], &b->lru); + release(b); } + } +} - parent = *new; - new = b->block < found->block ? - &found->node.rb_left : &found->node.rb_right; +static void cache_remove_range(struct dm_buffer_cache *bc, + sector_t begin, sector_t end, + b_predicate pred, b_release release) +{ + unsigned int i; + + for (i = 0; i < bc->num_locks; i++) { + down_write(&bc->trees[i].lock); + __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); + up_write(&bc->trees[i].lock); } +} - rb_link_node(&b->node, parent, new); - rb_insert_color(&b->node, &c->buffer_tree); +/*----------------------------------------------------------------*/ + +/* + * Linking of buffers: + * All buffers are linked to buffer_cache with their node field. + * + * Clean buffers that are not being written (B_WRITING not set) + * are linked to lru[LIST_CLEAN] with their lru_list field. + * + * Dirty and clean buffers that are being written are linked to + * lru[LIST_DIRTY] with their lru_list field. When the write + * finishes, the buffer cannot be relinked immediately (because we + * are in an interrupt context and relinking requires process + * context), so some clean-not-writing buffers can be held on + * dirty_lru too. They are later added to lru in the process + * context. + */ +struct dm_bufio_client { + struct block_device *bdev; + unsigned int block_size; + s8 sectors_per_block_bits; + + bool no_sleep; + struct mutex lock; + spinlock_t spinlock; + + int async_write_error; + + void (*alloc_callback)(struct dm_buffer *buf); + void (*write_callback)(struct dm_buffer *buf); + struct kmem_cache *slab_buffer; + struct kmem_cache *slab_cache; + struct dm_io_client *dm_io; + + struct list_head reserved_buffers; + unsigned int need_reserved_buffers; + + unsigned int minimum_buffers; + + sector_t start; + + struct shrinker shrinker; + struct work_struct shrink_work; + atomic_long_t need_shrink; + + wait_queue_head_t free_buffer_wait; + + struct list_head client_list; + + /* + * Used by global_cleanup to sort the clients list. + */ + unsigned long oldest_buffer; + + struct dm_buffer_cache cache; /* must be last member */ +}; + +static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); + +/*----------------------------------------------------------------*/ + +#define dm_bufio_in_request() (!!current->bio_list) + +static void dm_bufio_lock(struct dm_bufio_client *c) +{ + if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) + spin_lock_bh(&c->spinlock); + else + mutex_lock_nested(&c->lock, dm_bufio_in_request()); } -static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) +static void dm_bufio_unlock(struct dm_bufio_client *c) { - rb_erase(&b->node, &c->buffer_tree); + if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) + spin_unlock_bh(&c->spinlock); + else + mutex_unlock(&c->lock); } /*----------------------------------------------------------------*/ +/* + * Default cache size: available memory divided by the ratio. + */ +static unsigned long dm_bufio_default_cache_size; + +/* + * Total cache size set by the user. + */ +static unsigned long dm_bufio_cache_size; + +/* + * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change + * at any time. If it disagrees, the user has changed cache size. + */ +static unsigned long dm_bufio_cache_size_latch; + +static DEFINE_SPINLOCK(global_spinlock); + +/* + * Buffers are freed after this timeout + */ +static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; +static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; + +static unsigned long dm_bufio_peak_allocated; +static unsigned long dm_bufio_allocated_kmem_cache; +static unsigned long dm_bufio_allocated_get_free_pages; +static unsigned long dm_bufio_allocated_vmalloc; +static unsigned long dm_bufio_current_allocated; + +/*----------------------------------------------------------------*/ + +/* + * The current number of clients. + */ +static int dm_bufio_client_count; + +/* + * The list of all clients. + */ +static LIST_HEAD(dm_bufio_all_clients); + +/* + * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count + */ +static DEFINE_MUTEX(dm_bufio_clients_lock); + +static struct workqueue_struct *dm_bufio_wq; +static struct delayed_work dm_bufio_cleanup_old_work; +static struct work_struct dm_bufio_replacement_work; + + +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING +static void buffer_record_stack(struct dm_buffer *b) +{ + b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); +} +#endif + +/*----------------------------------------------------------------*/ + static void adjust_total_allocated(struct dm_buffer *b, bool unlink) { unsigned char data_mode; @@ -358,16 +1089,9 @@ static void adjust_total_allocated(struct dm_buffer *b, bool unlink) if (dm_bufio_current_allocated > dm_bufio_peak_allocated) dm_bufio_peak_allocated = dm_bufio_current_allocated; - b->accessed = 1; - if (!unlink) { - list_add(&b->global_list, &global_queue); - global_num++; if (dm_bufio_current_allocated > dm_bufio_cache_size) queue_work(dm_bufio_wq, &dm_bufio_replacement_work); - } else { - list_del(&b->global_list); - global_num--; } spin_unlock(&global_spinlock); @@ -378,8 +1102,10 @@ static void adjust_total_allocated(struct dm_buffer *b, bool unlink) */ static void __cache_size_refresh(void) { - BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); - BUG_ON(dm_bufio_client_count < 0); + if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock))) + return; + if (WARN_ON(dm_bufio_client_count < 0)) + return; dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); @@ -495,6 +1221,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) kmem_cache_free(c->slab_buffer, b); return NULL; } + adjust_total_allocated(b, false); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING b->stack_len = 0; @@ -509,62 +1236,12 @@ static void free_buffer(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; + adjust_total_allocated(b, true); free_buffer_data(c, b->data, b->data_mode); kmem_cache_free(c->slab_buffer, b); } /* - * Link buffer to the buffer tree and clean or dirty queue. - */ -static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) -{ - struct dm_bufio_client *c = b->c; - - c->n_buffers[dirty]++; - b->block = block; - b->list_mode = dirty; - list_add(&b->lru_list, &c->lru[dirty]); - __insert(b->c, b); - b->last_accessed = jiffies; - - adjust_total_allocated(b, false); -} - -/* - * Unlink buffer from the buffer tree and dirty or clean queue. - */ -static void __unlink_buffer(struct dm_buffer *b) -{ - struct dm_bufio_client *c = b->c; - - BUG_ON(!c->n_buffers[b->list_mode]); - - c->n_buffers[b->list_mode]--; - __remove(b->c, b); - list_del(&b->lru_list); - - adjust_total_allocated(b, true); -} - -/* - * Place the buffer to the head of dirty or clean LRU queue. - */ -static void __relink_lru(struct dm_buffer *b, int dirty) -{ - struct dm_bufio_client *c = b->c; - - b->accessed = 1; - - BUG_ON(!c->n_buffers[b->list_mode]); - - c->n_buffers[b->list_mode]--; - c->n_buffers[dirty]++; - b->list_mode = dirty; - list_move(&b->lru_list, &c->lru[dirty]); - b->last_accessed = jiffies; -} - -/* *-------------------------------------------------------------------------- * Submit I/O on the buffer. * @@ -639,19 +1316,14 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, { struct bio *bio; char *ptr; - unsigned int vec_size, len; + unsigned int len; - vec_size = b->c->block_size >> PAGE_SHIFT; - if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) - vec_size += 2; - - bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); + bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); if (!bio) { -dmio: use_dmio(b, op, sector, n_sectors, offset); return; } - bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op); + bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_complete; bio->bi_private = b; @@ -659,18 +1331,7 @@ dmio: ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - do { - unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len); - - if (!bio_add_page(bio, virt_to_page(ptr), this_step, - offset_in_page(ptr))) { - bio_put(bio); - goto dmio; - } - - len -= this_step; - ptr += this_step; - } while (len > 0); + __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr)); submit_bio(bio); } @@ -803,7 +1464,7 @@ static void __flush_write_list(struct list_head *write_list) */ static void __make_buffer_clean(struct dm_buffer *b) { - BUG_ON(b->hold_count); + BUG_ON(atomic_read(&b->hold_count)); /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ if (!smp_load_acquire(&b->state)) /* fast case */ @@ -814,6 +1475,36 @@ static void __make_buffer_clean(struct dm_buffer *b) wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); } +static enum evict_result is_clean(struct dm_buffer *b, void *context) +{ + struct dm_bufio_client *c = context; + + /* These should never happen */ + if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) + return ER_DONT_EVICT; + if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) + return ER_DONT_EVICT; + if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) + return ER_DONT_EVICT; + + if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && + unlikely(test_bit(B_READING, &b->state))) + return ER_DONT_EVICT; + + return ER_EVICT; +} + +static enum evict_result is_dirty(struct dm_buffer *b, void *context) +{ + /* These should never happen */ + if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) + return ER_DONT_EVICT; + if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) + return ER_DONT_EVICT; + + return ER_EVICT; +} + /* * Find some buffer that is not held by anybody, clean it, unlink it and * return it. @@ -822,34 +1513,20 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) { struct dm_buffer *b; - list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { - BUG_ON(test_bit(B_WRITING, &b->state)); - BUG_ON(test_bit(B_DIRTY, &b->state)); - - if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && - unlikely(test_bit_acquire(B_READING, &b->state))) - continue; - - if (!b->hold_count) { - __make_buffer_clean(b); - __unlink_buffer(b); - return b; - } - cond_resched(); + b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); + if (b) { + /* this also waits for pending reads */ + __make_buffer_clean(b); + return b; } if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) return NULL; - list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { - BUG_ON(test_bit(B_READING, &b->state)); - - if (!b->hold_count) { - __make_buffer_clean(b); - __unlink_buffer(b); - return b; - } - cond_resched(); + b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); + if (b) { + __make_buffer_clean(b); + return b; } return NULL; @@ -870,7 +1547,12 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c) set_current_state(TASK_UNINTERRUPTIBLE); dm_bufio_unlock(c); - io_schedule(); + /* + * It's possible to miss a wake up event since we don't always + * hold c->lock when wake_up is called. So we have a timeout here, + * just in case. + */ + io_schedule_timeout(5 * HZ); remove_wait_queue(&c->free_buffer_wait, &wait); @@ -928,9 +1610,8 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client } if (!list_empty(&c->reserved_buffers)) { - b = list_entry(c->reserved_buffers.next, - struct dm_buffer, lru_list); - list_del(&b->lru_list); + b = list_to_buffer(c->reserved_buffers.next); + list_del(&b->lru.list); c->need_reserved_buffers++; return b; @@ -964,36 +1645,61 @@ static void __free_buffer_wake(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; + b->block = -1; if (!c->need_reserved_buffers) free_buffer(b); else { - list_add(&b->lru_list, &c->reserved_buffers); + list_add(&b->lru.list, &c->reserved_buffers); c->need_reserved_buffers--; } - wake_up(&c->free_buffer_wait); + /* + * We hold the bufio lock here, so no one can add entries to the + * wait queue anyway. + */ + if (unlikely(waitqueue_active(&c->free_buffer_wait))) + wake_up(&c->free_buffer_wait); } -static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, - struct list_head *write_list) +static enum evict_result cleaned(struct dm_buffer *b, void *context) { - struct dm_buffer *b, *tmp; + if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) + return ER_DONT_EVICT; /* should never happen */ - list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { - BUG_ON(test_bit(B_READING, &b->state)); + if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) + return ER_DONT_EVICT; + else + return ER_EVICT; +} - if (!test_bit(B_DIRTY, &b->state) && - !test_bit(B_WRITING, &b->state)) { - __relink_lru(b, LIST_CLEAN); - continue; - } +static void __move_clean_buffers(struct dm_bufio_client *c) +{ + cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); +} - if (no_wait && test_bit(B_WRITING, &b->state)) - return; +struct write_context { + int no_wait; + struct list_head *write_list; +}; - __write_dirty_buffer(b, write_list); - cond_resched(); - } +static enum it_action write_one(struct dm_buffer *b, void *context) +{ + struct write_context *wc = context; + + if (wc->no_wait && test_bit(B_WRITING, &b->state)) + return IT_COMPLETE; + + __write_dirty_buffer(b, wc->write_list); + return IT_NEXT; +} + +static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, + struct list_head *write_list) +{ + struct write_context wc = {.no_wait = no_wait, .write_list = write_list}; + + __move_clean_buffers(c); + cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); } /* @@ -1004,7 +1710,8 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, static void __check_watermark(struct dm_bufio_client *c, struct list_head *write_list) { - if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO) + if (cache_count(&c->cache, LIST_DIRTY) > + cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) __write_dirty_buffers_async(c, 1, write_list); } @@ -1014,6 +1721,21 @@ static void __check_watermark(struct dm_bufio_client *c, *-------------------------------------------------------------- */ +static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) +{ + /* + * Relying on waitqueue_active() is racey, but we sleep + * with schedule_timeout anyway. + */ + if (cache_put(&c->cache, b) && + unlikely(waitqueue_active(&c->free_buffer_wait))) + wake_up(&c->free_buffer_wait); +} + +/* + * This assumes you have already checked the cache to see if the buffer + * is already present (it will recheck after dropping the lock for allocation). + */ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, enum new_flag nf, int *need_submit, struct list_head *write_list) @@ -1022,11 +1744,8 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, *need_submit = 0; - b = __find(c, block); - if (b) - goto found_buffer; - - if (nf == NF_GET) + /* This can't be called with NF_GET */ + if (WARN_ON_ONCE(nf == NF_GET)) return NULL; new_b = __alloc_buffer_wait(c, nf); @@ -1037,7 +1756,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, * We've had a period where the mutex was unlocked, so need to * recheck the buffer tree. */ - b = __find(c, block); + b = cache_get(&c->cache, block); if (b) { __free_buffer_wake(new_b); goto found_buffer; @@ -1046,24 +1765,35 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, __check_watermark(c, write_list); b = new_b; - b->hold_count = 1; + atomic_set(&b->hold_count, 1); + WRITE_ONCE(b->last_accessed, jiffies); + b->block = block; b->read_error = 0; b->write_error = 0; - __link_buffer(b, block, LIST_CLEAN); + b->list_mode = LIST_CLEAN; - if (nf == NF_FRESH) { + if (nf == NF_FRESH) b->state = 0; - return b; + else { + b->state = 1 << B_READING; + *need_submit = 1; } - b->state = 1 << B_READING; - *need_submit = 1; + /* + * We mustn't insert into the cache until the B_READING state + * is set. Otherwise another thread could get it and use + * it before it had been read. + */ + cache_insert(&c->cache, b); return b; found_buffer: - if (nf == NF_PREFETCH) + if (nf == NF_PREFETCH) { + cache_put_and_wake(c, b); return NULL; + } + /* * Note: it is essential that we don't wait for the buffer to be * read if dm_bufio_get function is used. Both dm_bufio_get and @@ -1071,12 +1801,11 @@ found_buffer: * If the user called both dm_bufio_prefetch and dm_bufio_get on * the same buffer, it would deadlock if we waited. */ - if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { + cache_put_and_wake(c, b); return NULL; + } - b->hold_count++; - __relink_lru(b, test_bit(B_DIRTY, &b->state) || - test_bit(B_WRITING, &b->state)); return b; } @@ -1106,18 +1835,50 @@ static void read_endio(struct dm_buffer *b, blk_status_t status) static void *new_read(struct dm_bufio_client *c, sector_t block, enum new_flag nf, struct dm_buffer **bp) { - int need_submit; + int need_submit = 0; struct dm_buffer *b; LIST_HEAD(write_list); - dm_bufio_lock(c); - b = __bufio_new(c, block, nf, &need_submit, &write_list); + *bp = NULL; + + /* + * Fast path, hopefully the block is already in the cache. No need + * to get the client lock for this. + */ + b = cache_get(&c->cache, block); + if (b) { + if (nf == NF_PREFETCH) { + cache_put_and_wake(c, b); + return NULL; + } + + /* + * Note: it is essential that we don't wait for the buffer to be + * read if dm_bufio_get function is used. Both dm_bufio_get and + * dm_bufio_prefetch can be used in the driver request routine. + * If the user called both dm_bufio_prefetch and dm_bufio_get on + * the same buffer, it would deadlock if we waited. + */ + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { + cache_put_and_wake(c, b); + return NULL; + } + } + + if (!b) { + if (nf == NF_GET) + return NULL; + + dm_bufio_lock(c); + b = __bufio_new(c, block, nf, &need_submit, &write_list); + dm_bufio_unlock(c); + } + #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - if (b && b->hold_count == 1) + if (b && (atomic_read(&b->hold_count) == 1)) buffer_record_stack(b); #endif - dm_bufio_unlock(c); __flush_write_list(&write_list); @@ -1152,7 +1913,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_get); void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) { - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return ERR_PTR(-EINVAL); return new_read(c, block, NF_READ, bp); } @@ -1161,7 +1923,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_read); void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) { - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return ERR_PTR(-EINVAL); return new_read(c, block, NF_FRESH, bp); } @@ -1174,15 +1937,23 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, LIST_HEAD(write_list); - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return; /* should never happen */ blk_start_plug(&plug); - dm_bufio_lock(c); for (; n_blocks--; block++) { int need_submit; struct dm_buffer *b; + b = cache_get(&c->cache, block); + if (b) { + /* already in cache */ + cache_put_and_wake(c, b); + continue; + } + + dm_bufio_lock(c); b = __bufio_new(c, block, NF_PREFETCH, &need_submit, &write_list); if (unlikely(!list_empty(&write_list))) { @@ -1205,10 +1976,9 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, goto flush_plug; dm_bufio_lock(c); } + dm_bufio_unlock(c); } - dm_bufio_unlock(c); - flush_plug: blk_finish_plug(&plug); } @@ -1218,29 +1988,28 @@ void dm_bufio_release(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; - dm_bufio_lock(c); - - BUG_ON(!b->hold_count); - - b->hold_count--; - if (!b->hold_count) { - wake_up(&c->free_buffer_wait); + /* + * If there were errors on the buffer, and the buffer is not + * to be written, free the buffer. There is no point in caching + * invalid buffer. + */ + if ((b->read_error || b->write_error) && + !test_bit_acquire(B_READING, &b->state) && + !test_bit(B_WRITING, &b->state) && + !test_bit(B_DIRTY, &b->state)) { + dm_bufio_lock(c); - /* - * If there were errors on the buffer, and the buffer is not - * to be written, free the buffer. There is no point in caching - * invalid buffer. - */ - if ((b->read_error || b->write_error) && - !test_bit_acquire(B_READING, &b->state) && - !test_bit(B_WRITING, &b->state) && - !test_bit(B_DIRTY, &b->state)) { - __unlink_buffer(b); + /* cache remove can fail if there are other holders */ + if (cache_remove(&c->cache, b)) { __free_buffer_wake(b); + dm_bufio_unlock(c); + return; } + + dm_bufio_unlock(c); } - dm_bufio_unlock(c); + cache_put_and_wake(c, b); } EXPORT_SYMBOL_GPL(dm_bufio_release); @@ -1259,7 +2028,7 @@ void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, if (!test_and_set_bit(B_DIRTY, &b->state)) { b->dirty_start = start; b->dirty_end = end; - __relink_lru(b, LIST_DIRTY); + cache_mark(&c->cache, b, LIST_DIRTY); } else { if (start < b->dirty_start) b->dirty_start = start; @@ -1281,7 +2050,8 @@ void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) { LIST_HEAD(write_list); - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return; /* should never happen */ dm_bufio_lock(c); __write_dirty_buffers_async(c, 0, &write_list); @@ -1297,11 +2067,19 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); * * Finally, we flush hardware disk cache. */ +static bool is_writing(struct lru_entry *e, void *context) +{ + struct dm_buffer *b = le_to_buffer(e); + + return test_bit(B_WRITING, &b->state); +} + int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) { int a, f; - unsigned long buffers_processed = 0; - struct dm_buffer *b, *tmp; + unsigned long nr_buffers; + struct lru_entry *e; + struct lru_iter it; LIST_HEAD(write_list); @@ -1311,52 +2089,32 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) __flush_write_list(&write_list); dm_bufio_lock(c); -again: - list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { - int dropped_lock = 0; - - if (buffers_processed < c->n_buffers[LIST_DIRTY]) - buffers_processed++; + nr_buffers = cache_count(&c->cache, LIST_DIRTY); + lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); + while ((e = lru_iter_next(&it, is_writing, c))) { + struct dm_buffer *b = le_to_buffer(e); + __cache_inc_buffer(b); BUG_ON(test_bit(B_READING, &b->state)); - if (test_bit(B_WRITING, &b->state)) { - if (buffers_processed < c->n_buffers[LIST_DIRTY]) { - dropped_lock = 1; - b->hold_count++; - dm_bufio_unlock(c); - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - dm_bufio_lock(c); - b->hold_count--; - } else - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); + if (nr_buffers) { + nr_buffers--; + dm_bufio_unlock(c); + wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); + dm_bufio_lock(c); + } else { + wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); } - if (!test_bit(B_DIRTY, &b->state) && - !test_bit(B_WRITING, &b->state)) - __relink_lru(b, LIST_CLEAN); + if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) + cache_mark(&c->cache, b, LIST_CLEAN); - cond_resched(); + cache_put_and_wake(c, b); - /* - * If we dropped the lock, the list is no longer consistent, - * so we must restart the search. - * - * In the most common case, the buffer just processed is - * relinked to the clean list, so we won't loop scanning the - * same buffer again and again. - * - * This may livelock if there is another thread simultaneously - * dirtying buffers, so we count the number of buffers walked - * and if it exceeds the total number of buffers, it means that - * someone is doing some writes simultaneously with us. In - * this case, stop, dropping the lock. - */ - if (dropped_lock) - goto again; + cond_resched(); } + lru_iter_end(&it); + wake_up(&c->free_buffer_wait); dm_bufio_unlock(c); @@ -1386,7 +2144,8 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) .count = 0, }; - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return -EINVAL; return dm_io(&io_req, 1, &io_reg, NULL); } @@ -1409,95 +2168,30 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c .count = block_to_sector(c, count), }; - BUG_ON(dm_bufio_in_request()); + if (WARN_ON_ONCE(dm_bufio_in_request())) + return -EINVAL; /* discards are optional */ return dm_io(&io_req, 1, &io_reg, NULL); } EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); -/* - * We first delete any other buffer that may be at that new location. - * - * Then, we write the buffer to the original location if it was dirty. - * - * Then, if we are the only one who is holding the buffer, relink the buffer - * in the buffer tree for the new location. - * - * If there was someone else holding the buffer, we write it to the new - * location but not relink it, because that other user needs to have the buffer - * at the same place. - */ -void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) +static bool forget_buffer(struct dm_bufio_client *c, sector_t block) { - struct dm_bufio_client *c = b->c; - struct dm_buffer *new; - - BUG_ON(dm_bufio_in_request()); - - dm_bufio_lock(c); + struct dm_buffer *b; -retry: - new = __find(c, new_block); - if (new) { - if (new->hold_count) { - __wait_for_free_buffer(c); - goto retry; + b = cache_get(&c->cache, block); + if (b) { + if (likely(!smp_load_acquire(&b->state))) { + if (cache_remove(&c->cache, b)) + __free_buffer_wake(b); + else + cache_put_and_wake(c, b); + } else { + cache_put_and_wake(c, b); } - - /* - * FIXME: Is there any point waiting for a write that's going - * to be overwritten in a bit? - */ - __make_buffer_clean(new); - __unlink_buffer(new); - __free_buffer_wake(new); } - BUG_ON(!b->hold_count); - BUG_ON(test_bit(B_READING, &b->state)); - - __write_dirty_buffer(b, NULL); - if (b->hold_count == 1) { - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - set_bit(B_DIRTY, &b->state); - b->dirty_start = 0; - b->dirty_end = c->block_size; - __unlink_buffer(b); - __link_buffer(b, new_block, LIST_DIRTY); - } else { - sector_t old_block; - - wait_on_bit_lock_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - /* - * Relink buffer to "new_block" so that write_callback - * sees "new_block" as a block number. - * After the write, link the buffer back to old_block. - * All this must be done in bufio lock, so that block number - * change isn't visible to other threads. - */ - old_block = b->block; - __unlink_buffer(b); - __link_buffer(b, new_block, b->list_mode); - submit_io(b, REQ_OP_WRITE, write_endio); - wait_on_bit_io(&b->state, B_WRITING, - TASK_UNINTERRUPTIBLE); - __unlink_buffer(b); - __link_buffer(b, old_block, b->list_mode); - } - - dm_bufio_unlock(c); - dm_bufio_release(b); -} -EXPORT_SYMBOL_GPL(dm_bufio_release_move); - -static void forget_buffer_locked(struct dm_buffer *b) -{ - if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) { - __unlink_buffer(b); - __free_buffer_wake(b); - } + return b ? true : false; } /* @@ -1508,38 +2202,22 @@ static void forget_buffer_locked(struct dm_buffer *b) */ void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) { - struct dm_buffer *b; - dm_bufio_lock(c); - - b = __find(c, block); - if (b) - forget_buffer_locked(b); - + forget_buffer(c, block); dm_bufio_unlock(c); } EXPORT_SYMBOL_GPL(dm_bufio_forget); -void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) +static enum evict_result idle(struct dm_buffer *b, void *context) { - struct dm_buffer *b; - sector_t end_block = block + n_blocks; - - while (block < end_block) { - dm_bufio_lock(c); - - b = __find_next(c, block); - if (b) { - block = b->block + 1; - forget_buffer_locked(b); - } - - dm_bufio_unlock(c); - - if (!b) - break; - } + return b->state ? ER_DONT_EVICT : ER_EVICT; +} +void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) +{ + dm_bufio_lock(c); + cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); + dm_bufio_unlock(c); } EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); @@ -1601,13 +2279,29 @@ struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) } EXPORT_SYMBOL_GPL(dm_bufio_get_client); +static enum it_action warn_leak(struct dm_buffer *b, void *context) +{ + bool *warned = context; + + WARN_ON(!(*warned)); + *warned = true; + DMERR("leaked buffer %llx, hold count %u, list %d", + (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); +#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING + stack_trace_print(b->stack_entries, b->stack_len, 1); + /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */ + atomic_set(&b->hold_count, 0); +#endif + return IT_NEXT; +} + static void drop_buffers(struct dm_bufio_client *c) { - struct dm_buffer *b; int i; - bool warned = false; + struct dm_buffer *b; - BUG_ON(dm_bufio_in_request()); + if (WARN_ON(dm_bufio_in_request())) + return; /* should never happen */ /* * An optimization so that the buffers are not written one-by-one. @@ -1619,18 +2313,11 @@ static void drop_buffers(struct dm_bufio_client *c) while ((b = __get_unclaimed_buffer(c))) __free_buffer_wake(b); - for (i = 0; i < LIST_SIZE; i++) - list_for_each_entry(b, &c->lru[i], lru_list) { - WARN_ON(!warned); - warned = true; - DMERR("leaked buffer %llx, hold count %u, list %d", - (unsigned long long)b->block, b->hold_count, i); -#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - stack_trace_print(b->stack_entries, b->stack_len, 1); - /* mark unclaimed to avoid BUG_ON below */ - b->hold_count = 0; -#endif - } + for (i = 0; i < LIST_SIZE; i++) { + bool warned = false; + + cache_iterate(&c->cache, i, warn_leak, &warned); + } #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING while ((b = __get_unclaimed_buffer(c))) @@ -1638,39 +2325,11 @@ static void drop_buffers(struct dm_bufio_client *c) #endif for (i = 0; i < LIST_SIZE; i++) - BUG_ON(!list_empty(&c->lru[i])); + WARN_ON(cache_count(&c->cache, i)); dm_bufio_unlock(c); } -/* - * We may not be able to evict this buffer if IO pending or the client - * is still using it. Caller is expected to know buffer is too old. - * - * And if GFP_NOFS is used, we must not do any I/O because we hold - * dm_bufio_clients_lock and we would risk deadlock if the I/O gets - * rerouted to different bufio client. - */ -static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) -{ - if (!(gfp & __GFP_FS) || - (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { - if (test_bit_acquire(B_READING, &b->state) || - test_bit(B_WRITING, &b->state) || - test_bit(B_DIRTY, &b->state)) - return false; - } - - if (b->hold_count) - return false; - - __make_buffer_clean(b); - __unlink_buffer(b); - __free_buffer_wake(b); - - return true; -} - static unsigned long get_retain_buffers(struct dm_bufio_client *c) { unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); @@ -1686,22 +2345,28 @@ static unsigned long get_retain_buffers(struct dm_bufio_client *c) static void __scan(struct dm_bufio_client *c) { int l; - struct dm_buffer *b, *tmp; + struct dm_buffer *b; unsigned long freed = 0; - unsigned long count = c->n_buffers[LIST_CLEAN] + - c->n_buffers[LIST_DIRTY]; unsigned long retain_target = get_retain_buffers(c); + unsigned long count = cache_total(&c->cache); for (l = 0; l < LIST_SIZE; l++) { - list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { + while (true) { if (count - freed <= retain_target) atomic_long_set(&c->need_shrink, 0); if (!atomic_long_read(&c->need_shrink)) - return; - if (__try_evict_buffer(b, GFP_KERNEL)) { - atomic_long_dec(&c->need_shrink); - freed++; - } + break; + + b = cache_evict(&c->cache, l, + l == LIST_CLEAN ? is_clean : is_dirty, c); + if (!b) + break; + + __make_buffer_clean(b); + __free_buffer_wake(b); + + atomic_long_dec(&c->need_shrink); + freed++; cond_resched(); } } @@ -1730,8 +2395,7 @@ static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); - unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + - READ_ONCE(c->n_buffers[LIST_DIRTY]); + unsigned long count = cache_total(&c->cache); unsigned long retain_target = get_retain_buffers(c); unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); @@ -1758,8 +2422,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign unsigned int flags) { int r; + unsigned int num_locks; struct dm_bufio_client *c; - unsigned int i; char slab_name[27]; if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { @@ -1768,12 +2432,13 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign goto bad_client; } - c = kzalloc(sizeof(*c), GFP_KERNEL); + num_locks = dm_num_hash_locks(); + c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); if (!c) { r = -ENOMEM; goto bad_client; } - c->buffer_tree = RB_ROOT; + cache_init(&c->cache, num_locks); c->bdev = bdev; c->block_size = block_size; @@ -1790,11 +2455,6 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign static_branch_inc(&no_sleep_enabled); } - for (i = 0; i < LIST_SIZE; i++) { - INIT_LIST_HEAD(&c->lru[i]); - c->n_buffers[i] = 0; - } - mutex_init(&c->lock); spin_lock_init(&c->spinlock); INIT_LIST_HEAD(&c->reserved_buffers); @@ -1866,9 +2526,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign bad: while (!list_empty(&c->reserved_buffers)) { - struct dm_buffer *b = list_entry(c->reserved_buffers.next, - struct dm_buffer, lru_list); - list_del(&b->lru_list); + struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); + + list_del(&b->lru.list); free_buffer(b); } kmem_cache_destroy(c->slab_cache); @@ -1905,23 +2565,23 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) mutex_unlock(&dm_bufio_clients_lock); - BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); - BUG_ON(c->need_reserved_buffers); + WARN_ON(c->need_reserved_buffers); while (!list_empty(&c->reserved_buffers)) { - struct dm_buffer *b = list_entry(c->reserved_buffers.next, - struct dm_buffer, lru_list); - list_del(&b->lru_list); + struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); + + list_del(&b->lru.list); free_buffer(b); } for (i = 0; i < LIST_SIZE; i++) - if (c->n_buffers[i]) - DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); + if (cache_count(&c->cache, i)) + DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); for (i = 0; i < LIST_SIZE; i++) - BUG_ON(c->n_buffers[i]); + WARN_ON(cache_count(&c->cache, i)); + cache_destroy(&c->cache); kmem_cache_destroy(c->slab_cache); kmem_cache_destroy(c->slab_buffer); dm_io_client_destroy(c->dm_io); @@ -1938,6 +2598,8 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) } EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); +/*--------------------------------------------------------------*/ + static unsigned int get_max_age_hz(void) { unsigned int max_age = READ_ONCE(dm_bufio_max_age); @@ -1950,13 +2612,74 @@ static unsigned int get_max_age_hz(void) static bool older_than(struct dm_buffer *b, unsigned long age_hz) { - return time_after_eq(jiffies, b->last_accessed + age_hz); + return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); +} + +struct evict_params { + gfp_t gfp; + unsigned long age_hz; + + /* + * This gets updated with the largest last_accessed (ie. most + * recently used) of the evicted buffers. It will not be reinitialised + * by __evict_many(), so you can use it across multiple invocations. + */ + unsigned long last_accessed; +}; + +/* + * We may not be able to evict this buffer if IO pending or the client + * is still using it. + * + * And if GFP_NOFS is used, we must not do any I/O because we hold + * dm_bufio_clients_lock and we would risk deadlock if the I/O gets + * rerouted to different bufio client. + */ +static enum evict_result select_for_evict(struct dm_buffer *b, void *context) +{ + struct evict_params *params = context; + + if (!(params->gfp & __GFP_FS) || + (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { + if (test_bit_acquire(B_READING, &b->state) || + test_bit(B_WRITING, &b->state) || + test_bit(B_DIRTY, &b->state)) + return ER_DONT_EVICT; + } + + return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP; } -static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) +static unsigned long __evict_many(struct dm_bufio_client *c, + struct evict_params *params, + int list_mode, unsigned long max_count) { - struct dm_buffer *b, *tmp; - unsigned long retain_target = get_retain_buffers(c); + unsigned long count; + unsigned long last_accessed; + struct dm_buffer *b; + + for (count = 0; count < max_count; count++) { + b = cache_evict(&c->cache, list_mode, select_for_evict, params); + if (!b) + break; + + last_accessed = READ_ONCE(b->last_accessed); + if (time_after_eq(params->last_accessed, last_accessed)) + params->last_accessed = last_accessed; + + __make_buffer_clean(b); + __free_buffer_wake(b); + + cond_resched(); + } + + return count; +} + +static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) +{ + struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0}; + unsigned long retain = get_retain_buffers(c); unsigned long count; LIST_HEAD(write_list); @@ -1969,112 +2692,135 @@ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) dm_bufio_lock(c); } - count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; - list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { - if (count <= retain_target) - break; - - if (!older_than(b, age_hz)) - break; - - if (__try_evict_buffer(b, 0)) - count--; - - cond_resched(); - } + count = cache_total(&c->cache); + if (count > retain) + __evict_many(c, ¶ms, LIST_CLEAN, count - retain); dm_bufio_unlock(c); } -static void do_global_cleanup(struct work_struct *w) +static void cleanup_old_buffers(void) { - struct dm_bufio_client *locked_client = NULL; - struct dm_bufio_client *current_client; - struct dm_buffer *b; - unsigned int spinlock_hold_count; - unsigned long threshold = dm_bufio_cache_size - - dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; - unsigned long loops = global_num * 2; + unsigned long max_age_hz = get_max_age_hz(); + struct dm_bufio_client *c; mutex_lock(&dm_bufio_clients_lock); - while (1) { - cond_resched(); + __cache_size_refresh(); - spin_lock(&global_spinlock); - if (unlikely(dm_bufio_current_allocated <= threshold)) - break; + list_for_each_entry(c, &dm_bufio_all_clients, client_list) + evict_old_buffers(c, max_age_hz); - spinlock_hold_count = 0; -get_next: - if (!loops--) - break; - if (unlikely(list_empty(&global_queue))) - break; - b = list_entry(global_queue.prev, struct dm_buffer, global_list); - - if (b->accessed) { - b->accessed = 0; - list_move(&b->global_list, &global_queue); - if (likely(++spinlock_hold_count < 16)) - goto get_next; - spin_unlock(&global_spinlock); - continue; - } + mutex_unlock(&dm_bufio_clients_lock); +} - current_client = b->c; - if (unlikely(current_client != locked_client)) { - if (locked_client) - dm_bufio_unlock(locked_client); +static void work_fn(struct work_struct *w) +{ + cleanup_old_buffers(); - if (!dm_bufio_trylock(current_client)) { - spin_unlock(&global_spinlock); - dm_bufio_lock(current_client); - locked_client = current_client; - continue; - } + queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, + DM_BUFIO_WORK_TIMER_SECS * HZ); +} - locked_client = current_client; - } +/*--------------------------------------------------------------*/ - spin_unlock(&global_spinlock); +/* + * Global cleanup tries to evict the oldest buffers from across _all_ + * the clients. It does this by repeatedly evicting a few buffers from + * the client that holds the oldest buffer. It's approximate, but hopefully + * good enough. + */ +static struct dm_bufio_client *__pop_client(void) +{ + struct list_head *h; - if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { - spin_lock(&global_spinlock); - list_move(&b->global_list, &global_queue); - spin_unlock(&global_spinlock); - } + if (list_empty(&dm_bufio_all_clients)) + return NULL; + + h = dm_bufio_all_clients.next; + list_del(h); + return container_of(h, struct dm_bufio_client, client_list); +} + +/* + * Inserts the client in the global client list based on its + * 'oldest_buffer' field. + */ +static void __insert_client(struct dm_bufio_client *new_client) +{ + struct dm_bufio_client *c; + struct list_head *h = dm_bufio_all_clients.next; + + while (h != &dm_bufio_all_clients) { + c = container_of(h, struct dm_bufio_client, client_list); + if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) + break; + h = h->next; } - spin_unlock(&global_spinlock); + list_add_tail(&new_client->client_list, h); +} - if (locked_client) - dm_bufio_unlock(locked_client); +static unsigned long __evict_a_few(unsigned long nr_buffers) +{ + unsigned long count; + struct dm_bufio_client *c; + struct evict_params params = { + .gfp = GFP_KERNEL, + .age_hz = 0, + /* set to jiffies in case there are no buffers in this client */ + .last_accessed = jiffies + }; - mutex_unlock(&dm_bufio_clients_lock); + c = __pop_client(); + if (!c) + return 0; + + dm_bufio_lock(c); + count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers); + dm_bufio_unlock(c); + + if (count) + c->oldest_buffer = params.last_accessed; + __insert_client(c); + + return count; } -static void cleanup_old_buffers(void) +static void check_watermarks(void) { - unsigned long max_age_hz = get_max_age_hz(); + LIST_HEAD(write_list); struct dm_bufio_client *c; mutex_lock(&dm_bufio_clients_lock); + list_for_each_entry(c, &dm_bufio_all_clients, client_list) { + dm_bufio_lock(c); + __check_watermark(c, &write_list); + dm_bufio_unlock(c); + } + mutex_unlock(&dm_bufio_clients_lock); - __cache_size_refresh(); + __flush_write_list(&write_list); +} - list_for_each_entry(c, &dm_bufio_all_clients, client_list) - __evict_old_buffers(c, max_age_hz); +static void evict_old(void) +{ + unsigned long threshold = dm_bufio_cache_size - + dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; + mutex_lock(&dm_bufio_clients_lock); + while (dm_bufio_current_allocated > threshold) { + if (!__evict_a_few(64)) + break; + cond_resched(); + } mutex_unlock(&dm_bufio_clients_lock); } -static void work_fn(struct work_struct *w) +static void do_global_cleanup(struct work_struct *w) { - cleanup_old_buffers(); - - queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, - DM_BUFIO_WORK_TIMER_SECS * HZ); + check_watermarks(); + evict_old(); } /* @@ -2159,7 +2905,7 @@ static void __exit dm_bufio_exit(void) bug = 1; } - BUG_ON(bug); + WARN_ON(bug); /* leaks are not worth crashing the system */ } module_init(dm_bufio_init) |