summaryrefslogtreecommitdiffstats
path: root/fs/netfs/buffered_read.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-12-13 14:50:03 +0100
committerChristian Brauner <brauner@kernel.org>2024-12-20 22:07:56 +0100
commit105549d09a539a876b7c3330ab52d8aceedad358 (patch)
tree93258e5fc0cff2c98632c6b34e412106859de9b2 /fs/netfs/buffered_read.c
parentnetfs: Fix non-contiguous donation between completed reads (diff)
downloadlinux-105549d09a539a876b7c3330ab52d8aceedad358.tar.xz
linux-105549d09a539a876b7c3330ab52d8aceedad358.zip
netfs: Fix enomem handling in buffered reads
If netfs_read_to_pagecache() gets an error from either ->prepare_read() or from netfs_prepare_read_iterator(), it needs to decrement ->nr_outstanding, cancel the subrequest and break out of the issuing loop. Currently, it only does this for two of the cases, but there are two more that aren't handled. Fix this by moving the handling to a common place and jumping to it from all four places. This is in preference to inserting a wrapper around netfs_prepare_read_iterator() as proposed by Dmitry Antipov[1]. Link: https://lore.kernel.org/r/20241202093943.227786-1-dmantipov@yandex.ru/ [1] Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading") Reported-by: syzbot+404b4b745080b6210c6c@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=404b4b745080b6210c6c Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241213135013.2964079-4-dhowells@redhat.com Tested-by: syzbot+404b4b745080b6210c6c@syzkaller.appspotmail.com cc: Dmitry Antipov <dmantipov@yandex.ru> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs/buffered_read.c')
-rw-r--r--fs/netfs/buffered_read.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 7ac34550c403..4dc9b8286355 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
- if (ret < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false,
- netfs_sreq_trace_put_cancel);
- break;
- }
+ if (ret < 0)
+ goto prep_failed;
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}
slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
- ret = slice;
- break;
- }
+ if (slice < 0)
+ goto prep_iter_failed;
rreq->netfs_ops->issue_read(subreq);
goto done;
@@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0)
+ goto prep_iter_failed;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_read_subreq_terminated(subreq, 0, false);
goto done;
@@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0)
+ goto prep_iter_failed;
netfs_read_cache_to_pagecache(rreq, subreq);
goto done;
}
@@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
WARN_ON_ONCE(1);
break;
+ prep_iter_failed:
+ ret = slice;
+ prep_failed:
+ subreq->error = ret;
+ atomic_dec(&rreq->nr_outstanding);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ break;
+
done:
size -= slice;
start += slice;