diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-03-26 02:07:22 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-04-15 16:10:25 +0200 |
commit | 05eb5fe226461c6459b81f109a9c23b46ed8bc3b (patch) | |
tree | 853c3dee6195eccb9dbfe15dd6ea8019a17e0b1c /io_uring/io_uring.c | |
parent | io_uring/poll: shrink alloc cache size to 32 (diff) | |
download | linux-05eb5fe226461c6459b81f109a9c23b46ed8bc3b.tar.xz linux-05eb5fe226461c6459b81f109a9c23b46ed8bc3b.zip |
io_uring: refill request cache in memory order
The allocator will generally return memory in order, but
__io_alloc_req_refill() then adds them to a stack and we'll extract them
in the opposite order. This obviously isn't a huge deal, but:
1) it makes debugging easier when they are in order
2) keeping them in-order is the right thing to do
3) reduces the code for adding them to the stack
Just add them in reverse to the stack.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r-- | io_uring/io_uring.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 8e53b93eeb18..8489f1820ad9 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1039,7 +1039,7 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) { gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; void *reqs[IO_REQ_ALLOC_BATCH]; - int ret, i; + int ret; /* * If we have more than a batch's worth of requests in our IRQ side @@ -1066,8 +1066,8 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) } percpu_ref_get_many(&ctx->refs, ret); - for (i = 0; i < ret; i++) { - struct io_kiocb *req = reqs[i]; + while (ret--) { + struct io_kiocb *req = reqs[ret]; io_preinit_req(req, ctx); io_req_add_to_cache(req, ctx); |