summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c6
-rw-r--r--fs/afs/addr_prefs.c6
-rw-r--r--fs/afs/afs.h2
-rw-r--r--fs/afs/afs_vl.h1
-rw-r--r--fs/afs/vl_alias.c8
-rw-r--r--fs/afs/vlclient.c2
-rw-r--r--fs/afs/write.c5
-rw-r--r--fs/btrfs/bio.c16
-rw-r--r--fs/btrfs/ctree.c11
-rw-r--r--fs/btrfs/ctree.h19
-rw-r--r--fs/btrfs/extent-tree.c6
-rw-r--r--fs/btrfs/inode.c154
-rw-r--r--fs/btrfs/ioctl.c122
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/relocation.c6
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/sysfs.c6
-rw-r--r--fs/btrfs/tree-checker.c27
-rw-r--r--fs/btrfs/zlib.c4
-rw-r--r--fs/btrfs/zoned.c5
-rw-r--r--fs/cachefiles/daemon.c14
-rw-r--r--fs/cachefiles/internal.h3
-rw-r--r--fs/cachefiles/security.c6
-rw-r--r--fs/ceph/file.c77
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/erofs/data.c36
-rw-r--r--fs/erofs/fileio.c9
-rw-r--r--fs/erofs/fscache.c10
-rw-r--r--fs/erofs/internal.h15
-rw-r--r--fs/erofs/super.c78
-rw-r--r--fs/erofs/zdata.c4
-rw-r--r--fs/erofs/zutil.c7
-rw-r--r--fs/exfat/dir.c3
-rw-r--r--fs/exfat/fatent.c10
-rw-r--r--fs/exfat/file.c6
-rw-r--r--fs/exfat/namei.c4
-rw-r--r--fs/file.c1
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c31
-rw-r--r--fs/hfs/super.c4
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/iomap/buffered-io.c68
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/mount.h15
-rw-r--r--fs/namespace.c24
-rw-r--r--fs/netfs/buffered_read.c28
-rw-r--r--fs/netfs/direct_write.c8
-rw-r--r--fs/netfs/read_collect.c33
-rw-r--r--fs/netfs/read_pgpriv2.c4
-rw-r--r--fs/netfs/read_retry.c11
-rw-r--r--fs/netfs/write_collect.c14
-rw-r--r--fs/netfs/write_issue.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/super.c1
-rw-r--r--fs/nfsd/export.c31
-rw-r--r--fs/nfsd/export.h4
-rw-r--r--fs/nfsd/nfs4callback.c4
-rw-r--r--fs/nfsd/nfs4proc.c13
-rw-r--r--fs/nilfs2/btnode.c1
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c13
-rw-r--r--fs/nilfs2/namei.c5
-rw-r--r--fs/nilfs2/nilfs.h1
-rw-r--r--fs/notify/fdinfo.c4
-rw-r--r--fs/ocfs2/localalloc.c27
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/quota_local.c1
-rw-r--r--fs/overlayfs/copy_up.c16
-rw-r--r--fs/overlayfs/export.c49
-rw-r--r--fs/overlayfs/namei.c4
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/qnx6/inode.c11
-rw-r--r--fs/smb/client/Kconfig1
-rw-r--r--fs/smb/client/cifsfs.c2
-rw-r--r--fs/smb/client/cifsproto.h2
-rw-r--r--fs/smb/client/cifssmb.c13
-rw-r--r--fs/smb/client/connect.c36
-rw-r--r--fs/smb/client/file.c6
-rw-r--r--fs/smb/client/namespace.c19
-rw-r--r--fs/smb/client/sess.c25
-rw-r--r--fs/smb/client/smb2pdu.c14
-rw-r--r--fs/smb/server/connection.c18
-rw-r--r--fs/smb/server/connection.h1
-rw-r--r--fs/smb/server/server.c7
-rw-r--r--fs/smb/server/server.h1
-rw-r--r--fs/smb/server/smb2pdu.c45
-rw-r--r--fs/smb/server/smb2pdu.h10
-rw-r--r--fs/smb/server/transport_ipc.c5
-rw-r--r--fs/smb/server/transport_rdma.c3
-rw-r--r--fs/smb/server/vfs.c3
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h2
-rw-r--r--fs/xfs/xfs_dquot.c3
97 files changed, 856 insertions, 529 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 819c75233235..3bc9ce6c575e 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -57,6 +57,8 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
int err, len;
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+ if (len > 0)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
netfs_write_subrequest_terminated(subreq, len ?: err, false);
}
@@ -80,8 +82,10 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
- if (!err)
+ if (!err) {
subreq->transferred += total;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
netfs_read_subreq_terminated(subreq, err, false);
}
diff --git a/fs/afs/addr_prefs.c b/fs/afs/addr_prefs.c
index a189ff8a5034..c0384201b8fe 100644
--- a/fs/afs/addr_prefs.c
+++ b/fs/afs/addr_prefs.c
@@ -413,8 +413,10 @@ int afs_proc_addr_prefs_write(struct file *file, char *buf, size_t size)
do {
argc = afs_split_string(&buf, argv, ARRAY_SIZE(argv));
- if (argc < 0)
- return argc;
+ if (argc < 0) {
+ ret = argc;
+ goto done;
+ }
if (argc < 2)
goto inval;
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b488072aee87..ec3db00bd081 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
-#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */
+#define AFS_MAXCELLNAME 253 /* Maximum length of a cell name (DNS limited) */
#define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */
#define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */
#define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index a06296c8827d..b835e25a2c02 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -13,6 +13,7 @@
#define AFS_VL_PORT 7003 /* volume location service port */
#define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */
#define YFS_VL_SERVICE 2503 /* Service ID for AuriStor upgraded VL service */
+#define YFS_VL_MAXCELLNAME 256 /* Maximum length of a cell name in YFS protocol */
enum AFSVL_Operations {
VLGETENTRYBYID = 503, /* AFS Get VLDB entry by ID */
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index 9f36e14f1c2d..f9e76b604f31 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -253,6 +253,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
{
struct afs_cell *master;
+ size_t name_len;
char *cell_name;
cell_name = afs_vl_get_cell_name(cell, key);
@@ -264,8 +265,11 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
return 0;
}
- master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
- NULL, false);
+ name_len = strlen(cell_name);
+ if (!name_len || name_len > AFS_MAXCELLNAME)
+ master = ERR_PTR(-EOPNOTSUPP);
+ else
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false);
kfree(cell_name);
if (IS_ERR(master))
return PTR_ERR(master);
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index cac75f89b64a..55dd0fc5aad7 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -697,7 +697,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
return ret;
namesz = ntohl(call->tmp);
- if (namesz > AFS_MAXCELLNAME)
+ if (namesz > YFS_VL_MAXCELLNAME)
return afs_protocol_error(call, afs_eproto_cellname_len);
paddedsz = (namesz + 3) & ~3;
call->count = namesz;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 34107b55f834..ccb6aa8027c5 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -122,7 +122,7 @@ static void afs_issue_write_worker(struct work_struct *work)
if (subreq->debug_index == 3)
return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
- if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
+ if (!subreq->retry_count) {
set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
}
@@ -149,6 +149,9 @@ static void afs_issue_write_worker(struct work_struct *work)
afs_wait_for_operation(op);
ret = afs_put_operation(op);
switch (ret) {
+ case 0:
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ break;
case -EACCES:
case -EPERM:
case -ENOKEY:
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index af3db0a7ae4d..7ea6f0b43b95 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -358,7 +358,7 @@ static void btrfs_simple_end_io(struct bio *bio)
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
} else {
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
btrfs_record_physical_zoned(bbio);
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
@@ -401,7 +401,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
else
bio->bi_status = BLK_STS_OK;
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
@@ -415,7 +415,7 @@ static void btrfs_clone_write_end_io(struct bio *bio)
if (bio->bi_status) {
atomic_inc(&stripe->bioc->error);
btrfs_log_dev_io_error(bio, stripe->dev);
- } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ } else if (bio_is_zone_append(bio)) {
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
}
@@ -652,8 +652,14 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
map_length = min(map_length, bbio->fs_info->max_zone_append_size);
sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
&nr_segs, map_length);
- if (sector_offset)
- return sector_offset << SECTOR_SHIFT;
+ if (sector_offset) {
+ /*
+ * bio_split_rw_at() could split at a size smaller than our
+ * sectorsize and thus cause unaligned I/Os. Fix that by
+ * always rounding down to the nearest boundary.
+ */
+ return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, bbio->fs_info->sectorsize);
+ }
return map_length;
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 693dc27ffb89..185985a337b3 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -654,6 +654,8 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
goto error_unlock_cow;
}
}
+
+ trace_btrfs_cow_block(root, buf, cow);
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
@@ -710,7 +712,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start;
- int ret;
if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
btrfs_abort_transaction(trans, -EUCLEAN);
@@ -751,12 +752,8 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
- ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
- cow_ret, search_start, 0, nest);
-
- trace_btrfs_cow_block(root, buf, *cow_ret);
-
- return ret;
+ return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+ cow_ret, search_start, 0, nest);
}
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 307dedf95c70..2c341956a01c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -371,6 +371,25 @@ static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transi
}
/*
+ * Return the generation this root started with.
+ *
+ * Every normal root that is created with root->root_key.offset set to it's
+ * originating generation. If it is a snapshot it is the generation when the
+ * snapshot was created.
+ *
+ * However for TREE_RELOC roots root_key.offset is the objectid of the owning
+ * tree root. Thankfully we copy the root item of the owning tree root, which
+ * has it's last_snapshot set to what we would have root_key.offset set to, so
+ * return that if this is a TREE_RELOC root.
+ */
+static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
+{
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return btrfs_root_last_snapshot(&root->root_item);
+ return root->root_key.offset;
+}
+
+/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
*/
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index bd09dd3ad1a0..3c6f7fecbb9a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5285,7 +5285,7 @@ static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *
* reference to it.
*/
generation = btrfs_node_ptr_generation(eb, slot);
- if (!wc->update_ref || generation <= root->root_key.offset)
+ if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
return false;
/*
@@ -5340,7 +5340,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
goto reada;
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset)
+ generation <= btrfs_root_origin_generation(root))
continue;
/* We don't lock the tree block, it's OK to be racy here */
@@ -5683,7 +5683,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
* for the subtree
*/
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset) {
+ generation <= btrfs_root_origin_generation(root)) {
wc->lookup_info = 1;
return 1;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 488edca8333a..27b2fe7f735d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9078,9 +9078,9 @@ out:
}
struct btrfs_encoded_read_private {
- wait_queue_head_t wait;
+ struct completion done;
void *uring_ctx;
- atomic_t pending;
+ refcount_t pending_refs;
blk_status_t status;
};
@@ -9099,14 +9099,14 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (atomic_dec_and_test(&priv->pending)) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
int err = blk_status_to_errno(READ_ONCE(priv->status));
if (priv->uring_ctx) {
btrfs_uring_read_extent_endio(priv->uring_ctx, err);
kfree(priv);
} else {
- wake_up(&priv->wait);
+ complete(&priv->done);
}
}
bio_put(&bbio->bio);
@@ -9126,8 +9126,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
if (!priv)
return -ENOMEM;
- init_waitqueue_head(&priv->wait);
- atomic_set(&priv->pending, 1);
+ init_completion(&priv->done);
+ refcount_set(&priv->pending_refs, 1);
priv->status = 0;
priv->uring_ctx = uring_ctx;
@@ -9140,7 +9140,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- atomic_inc(&priv->pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
@@ -9155,11 +9155,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- atomic_inc(&priv->pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
if (uring_ctx) {
- if (atomic_dec_return(&priv->pending) == 0) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
ret = blk_status_to_errno(READ_ONCE(priv->status));
btrfs_uring_read_extent_endio(uring_ctx, ret);
kfree(priv);
@@ -9168,8 +9168,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
return -EIOCBQUEUED;
} else {
- if (atomic_dec_return(&priv->pending) != 0)
- io_wait_event(priv->wait, !atomic_read(&priv->pending));
+ if (!refcount_dec_and_test(&priv->pending_refs))
+ wait_for_completion_io(&priv->done);
/* See btrfs_encoded_read_endio() for ordering. */
ret = blk_status_to_errno(READ_ONCE(priv->status));
kfree(priv);
@@ -9799,15 +9799,25 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
- struct extent_map *em = NULL;
struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
};
+ struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
+ struct btrfs_path *path = NULL;
int ret = 0;
u64 isize;
- u64 start;
+ u64 prev_extent_end = 0;
+
+ /*
+ * Acquire the inode's mmap lock to prevent races with memory mapped
+ * writes, as they could happen after we flush delalloc below and before
+ * we lock the extent range further below. The inode was already locked
+ * up in the call chain.
+ */
+ btrfs_assert_inode_locked(BTRFS_I(inode));
+ down_write(&BTRFS_I(inode)->i_mmap_lock);
/*
* If the swap file was just created, make sure delalloc is done. If the
@@ -9816,22 +9826,32 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
*/
ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- return ret;
+ goto out_unlock_mmap;
/*
* The inode is locked, so these flags won't change after we check them.
*/
if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
btrfs_warn(fs_info, "swapfile must not be compressed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
btrfs_warn(fs_info, "swapfile must not be copy-on-write");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_warn(fs_info, "swapfile must not be checksummed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
+ }
+
+ path = btrfs_alloc_path();
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ if (!path || !backref_ctx) {
+ ret = -ENOMEM;
+ goto out_unlock_mmap;
}
/*
@@ -9846,7 +9866,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
btrfs_warn(fs_info,
"cannot activate swapfile while exclusive operation is running");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_mmap;
}
/*
@@ -9860,7 +9881,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because snapshot creation is in progress");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
/*
* Snapshots can create extents which require COW even if NODATACOW is
@@ -9881,7 +9903,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_warn(fs_info,
"cannot activate swapfile because subvolume %llu is being deleted",
btrfs_root_id(root));
- return -EPERM;
+ ret = -EPERM;
+ goto out_unlock_mmap;
}
atomic_inc(&root->nr_swapfiles);
spin_unlock(&root->root_item_lock);
@@ -9889,24 +9912,39 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
lock_extent(io_tree, 0, isize - 1, &cached_state);
- start = 0;
- while (start < isize) {
- u64 logical_block_start, physical_block_start;
+ while (prev_extent_end < isize) {
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
struct btrfs_block_group *bg;
- u64 len = isize - start;
+ u64 logical_block_start;
+ u64 physical_block_start;
+ u64 extent_gen;
+ u64 disk_bytenr;
+ u64 len;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = prev_extent_end;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
goto out;
- }
- if (em->disk_bytenr == EXTENT_MAP_HOLE) {
+ /*
+ * If key not found it means we have an implicit hole (NO_HOLES
+ * is enabled).
+ */
+ if (ret > 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
- if (em->disk_bytenr == EXTENT_MAP_INLINE) {
+
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
/*
* It's unlikely we'll ever actually find ourselves
* here, as a file small enough to fit inline won't be
@@ -9918,23 +9956,45 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
ret = -EINVAL;
goto out;
}
- if (extent_map_is_compressed(em)) {
+
+ if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out;
}
- logical_block_start = extent_map_block_start(em) + (start - em->start);
- len = min(len, em->len - (start - em->start));
- free_extent_map(em);
- em = NULL;
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (disk_bytenr == 0) {
+ btrfs_warn(fs_info, "swapfile must not have holes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+ prev_extent_end = btrfs_file_extent_end(path);
- ret = can_nocow_extent(inode, start, &len, NULL, false, true);
+ if (prev_extent_end > isize)
+ len = isize - key.offset;
+ else
+ len = btrfs_file_extent_num_bytes(leaf, ei);
+
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /*
+ * Don't need the path anymore, release to avoid deadlocks when
+ * calling btrfs_is_data_extent_shared() because when joining a
+ * transaction it can block waiting for the current one's commit
+ * which in turn may be trying to lock the same leaf to flush
+ * delayed items for example.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
+ extent_gen, backref_ctx);
if (ret < 0) {
goto out;
- } else if (ret) {
- ret = 0;
- } else {
+ } else if (ret > 0) {
btrfs_warn(fs_info,
"swapfile must not be copy-on-write");
ret = -EINVAL;
@@ -9969,7 +10029,6 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
physical_block_start = (map->stripes[0].physical +
(logical_block_start - map->start));
- len = min(len, map->chunk_len - (logical_block_start - map->start));
btrfs_free_chunk_map(map);
map = NULL;
@@ -10010,20 +10069,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
goto out;
}
- bsi.start = start;
+ bsi.start = key.offset;
bsi.block_start = physical_block_start;
bsi.block_len = len;
}
- start += len;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ cond_resched();
}
if (bsi.block_len)
ret = btrfs_add_swap_extent(sis, &bsi);
out:
- if (!IS_ERR_OR_NULL(em))
- free_extent_map(em);
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
@@ -10036,6 +10098,10 @@ out:
btrfs_exclop_finish(fs_info);
+out_unlock_mmap:
+ up_write(&BTRFS_I(inode)->i_mmap_lock);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
if (ret)
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3af8bb0c8d75..4d9305fa37a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4878,25 +4878,29 @@ out_fail:
return ret;
}
+struct btrfs_uring_encoded_data {
+ struct btrfs_ioctl_encoded_io_args args;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov;
+ struct iov_iter iter;
+};
+
static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
size_t copy_end;
- struct btrfs_ioctl_encoded_io_args args = { 0 };
int ret;
u64 disk_bytenr, disk_io_size;
struct file *file;
struct btrfs_inode *inode;
struct btrfs_fs_info *fs_info;
struct extent_io_tree *io_tree;
- struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov = iovstack;
- struct iov_iter iter;
loff_t pos;
struct kiocb kiocb;
struct extent_state *cached_state = NULL;
u64 start, lockend;
void __user *sqe_addr;
+ struct btrfs_uring_encoded_data *data = io_uring_cmd_get_async_data(cmd)->op_data;
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
@@ -4910,43 +4914,64 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
if (issue_flags & IO_URING_F_COMPAT) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_encoded_io_args_32 args32;
-
copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
- if (copy_from_user(&args32, sqe_addr, copy_end)) {
- ret = -EFAULT;
- goto out_acct;
- }
- args.iov = compat_ptr(args32.iov);
- args.iovcnt = args32.iovcnt;
- args.offset = args32.offset;
- args.flags = args32.flags;
#else
return -ENOTTY;
#endif
} else {
copy_end = copy_end_kernel;
- if (copy_from_user(&args, sqe_addr, copy_end)) {
- ret = -EFAULT;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
goto out_acct;
}
- }
- if (args.flags != 0)
- return -EINVAL;
+ io_uring_cmd_get_async_data(cmd)->op_data = data;
- ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
- &iov, &iter);
- if (ret < 0)
- goto out_acct;
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
- if (iov_iter_count(&iter) == 0) {
- ret = 0;
- goto out_free;
+ if (copy_from_user(&args32, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ if (data->args.flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_free;
+ }
}
- pos = args.offset;
- ret = rw_verify_area(READ, file, &pos, args.len);
+ pos = data->args.offset;
+ ret = rw_verify_area(READ, file, &pos, data->args.len);
if (ret < 0)
goto out_free;
@@ -4959,15 +4984,16 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
start = ALIGN_DOWN(pos, fs_info->sectorsize);
lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
- ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
+ ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
&disk_bytenr, &disk_io_size);
if (ret < 0 && ret != -EIOCBQUEUED)
goto out_free;
file_accessed(file);
- if (copy_to_user(sqe_addr + copy_end, (const char *)&args + copy_end_kernel,
- sizeof(args) - copy_end_kernel)) {
+ if (copy_to_user(sqe_addr + copy_end,
+ (const char *)&data->args + copy_end_kernel,
+ sizeof(data->args) - copy_end_kernel)) {
if (ret == -EIOCBQUEUED) {
unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -4977,40 +5003,22 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
}
if (ret == -EIOCBQUEUED) {
- u64 count;
-
- /*
- * If we've optimized things by storing the iovecs on the stack,
- * undo this.
- */
- if (!iov) {
- iov = kmalloc(sizeof(struct iovec) * args.iovcnt, GFP_NOFS);
- if (!iov) {
- unlock_extent(io_tree, start, lockend, &cached_state);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
- ret = -ENOMEM;
- goto out_acct;
- }
-
- memcpy(iov, iovstack, sizeof(struct iovec) * args.iovcnt);
- }
-
- count = min_t(u64, iov_iter_count(&iter), disk_io_size);
+ u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size);
/* Match ioctl by not returning past EOF if uncompressed. */
- if (!args.compression)
- count = min_t(u64, count, args.len);
+ if (!data->args.compression)
+ count = min_t(u64, count, data->args.len);
- ret = btrfs_uring_read_extent(&kiocb, &iter, start, lockend,
- cached_state, disk_bytenr,
- disk_io_size, count,
- args.compression, iov, cmd);
+ ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend,
+ cached_state, disk_bytenr, disk_io_size,
+ count, data->args.compression,
+ data->iov, cmd);
goto out_acct;
}
out_free:
- kfree(iov);
+ kfree(data->iov);
out_acct:
if (ret > 0)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a6f92836c9b1..f9b214992212 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1121,6 +1121,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
if (simple) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
} else {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -1254,8 +1255,6 @@ out_add_root:
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- if (simple)
- btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
spin_unlock(&fs_info->qgroup_lock);
/* Skip rescan for simple qgroups. */
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index bf267bdfa8f8..db8b42f674b7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2902,6 +2902,7 @@ static int relocate_one_folio(struct reloc_control *rc,
const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
ASSERT(index <= last_index);
+again:
folio = filemap_lock_folio(inode->i_mapping, index);
if (IS_ERR(folio)) {
@@ -2937,6 +2938,11 @@ static int relocate_one_folio(struct reloc_control *rc,
ret = -EIO;
goto release_folio;
}
+ if (folio->mapping != inode->i_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
/*
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 204c928beaf9..531312efee8d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1541,6 +1541,10 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
u64 extent_gen;
int ret;
+ if (unlikely(!extent_root)) {
+ btrfs_err(fs_info, "no valid extent root for scrub");
+ return -EUCLEAN;
+ }
memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
stripe->nr_sectors);
scrub_stripe_reset_bitmaps(stripe);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 7254279c3cc9..498c84323253 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5280,6 +5280,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
unsigned cur_len = min_t(unsigned, len,
PAGE_SIZE - pg_offset);
+again:
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
page_cache_sync_readahead(mapping,
@@ -5312,6 +5313,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
ret = -EIO;
break;
}
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index fdcbf650ac31..7f09b6c9cc2d 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -1118,7 +1118,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize);
+ return sysfs_emit(buf, "%u\n", fs_info->nodesize);
}
BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -1128,7 +1128,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
@@ -1180,7 +1180,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 148d8cefa40e..dfeee033f31f 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1527,6 +1527,11 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_extent_data_ref_count(leaf, dref);
break;
/* Contains parent bytenr and ref count */
@@ -1539,6 +1544,11 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
@@ -1611,8 +1621,18 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
{
u32 expect_item_size = 0;
- if (key->type == BTRFS_SHARED_DATA_REF_KEY)
+ if (key->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_shared_data_ref *sref;
+
+ sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref);
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
+
expect_item_size = sizeof(struct btrfs_shared_data_ref);
+ }
if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
generic_err(leaf, slot,
@@ -1689,6 +1709,11 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
offset, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
}
return 0;
}
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index ddf0d5a448a7..c9e92c6941ec 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -174,10 +174,10 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
copy_page(workspace->buf + i * PAGE_SIZE,
data_in);
start += PAGE_SIZE;
- workspace->strm.avail_in =
- (in_buf_folios << PAGE_SHIFT);
}
workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = min(bytes_left,
+ in_buf_folios << PAGE_SHIFT);
} else {
unsigned int pg_off;
unsigned int cur_len;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 11ed523e528e..df905ae82929 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -748,8 +748,9 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
(u64)lim->max_segments << PAGE_SHIFT),
fs_info->sectorsize);
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
- if (fs_info->max_zone_append_size < fs_info->max_extent_size)
- fs_info->max_extent_size = fs_info->max_zone_append_size;
+
+ fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
+ fs_info->max_zone_append_size);
/*
* Check mount options here, because we might change fs_info->zoned
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 89b11336a836..1806bff8e59b 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -15,6 +15,7 @@
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/mount.h>
+#include <linux/security.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
*/
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
{
- char *secctx;
+ int err;
_enter(",%s", args);
@@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
return -EINVAL;
}
- if (cache->secctx) {
+ if (cache->have_secid) {
pr_err("Second security context specified\n");
return -EINVAL;
}
- secctx = kstrdup(args, GFP_KERNEL);
- if (!secctx)
- return -ENOMEM;
+ err = security_secctx_to_secid(args, strlen(args), &cache->secid);
+ if (err)
+ return err;
- cache->secctx = secctx;
+ cache->have_secid = true;
return 0;
}
@@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
put_cred(cache->cache_cred);
kfree(cache->rootdirname);
- kfree(cache->secctx);
kfree(cache->tag);
_leave("");
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 7b99bd98de75..38c236e38cef 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -122,7 +122,6 @@ struct cachefiles_cache {
#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */
char *rootdirname; /* name of cache root directory */
- char *secctx; /* LSM security context */
char *tag; /* cache binding tag */
refcount_t unbind_pincount;/* refcount to do daemon unbind */
struct xarray reqs; /* xarray of pending on-demand requests */
@@ -130,6 +129,8 @@ struct cachefiles_cache {
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
u32 msg_id_next;
+ u32 secid; /* LSM security id */
+ bool have_secid; /* whether "secid" was set */
};
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index fe777164f1d8..fc6611886b3b 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
struct cred *new;
int ret;
- _enter("{%s}", cache->secctx);
+ _enter("{%u}", cache->have_secid ? cache->secid : 0);
new = prepare_kernel_cred(current);
if (!new) {
@@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
goto error;
}
- if (cache->secctx) {
- ret = set_security_override_from_ctx(new, cache->secctx);
+ if (cache->have_secid) {
+ ret = set_security_override(new, cache->secid);
if (ret < 0) {
put_cred(new);
pr_err("Security denies permission to nominate security context: error %d\n",
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4b8d59ebda00..851d70200c6b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1066,7 +1066,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
if (ceph_inode_is_shutdown(inode))
return -EIO;
- if (!len)
+ if (!len || !i_size)
return 0;
/*
* flush any page cache pages in this range. this
@@ -1086,7 +1086,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
int num_pages;
size_t page_off;
bool more;
- int idx;
+ int idx = 0;
size_t left;
struct ceph_osd_req_op *op;
u64 read_off = off;
@@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
len = read_off + read_len - off;
more = len < iov_iter_count(to);
+ op = &req->r_ops[0];
+ if (sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
num_pages = calc_pages_for(read_off, read_len);
page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
@@ -1127,17 +1137,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
offset_in_page(read_off),
- false, false);
-
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
+ false, true);
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
@@ -1160,7 +1160,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
else if (ret == -ENOENT)
ret = 0;
- if (ret > 0 && IS_ENCRYPTED(inode)) {
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ if (ret == -EBLOCKLISTED)
+ fsc->blocklisted = true;
+ break;
+ }
+
+ if (IS_ENCRYPTED(inode)) {
int fret;
fret = ceph_fscrypt_decrypt_extents(inode, pages,
@@ -1186,10 +1193,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret = min_t(ssize_t, fret, len);
}
- ceph_osdc_put_request(req);
-
/* Short read but not EOF? Zero out the remainder. */
- if (ret >= 0 && ret < len && (off + ret < i_size)) {
+ if (ret < len && (off + ret < i_size)) {
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
@@ -1199,13 +1204,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret += zlen;
}
- idx = 0;
- if (ret <= 0)
- left = 0;
- else if (off + ret > i_size)
- left = i_size - off;
+ if (off + ret > i_size)
+ left = (i_size > off) ? i_size - off : 0;
else
left = ret;
+
while (left > 0) {
size_t plen, copied;
@@ -1221,13 +1224,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
break;
}
}
- ceph_release_page_vector(pages, num_pages);
- if (ret < 0) {
- if (ret == -EBLOCKLISTED)
- fsc->blocklisted = true;
- break;
- }
+ ceph_osdc_put_request(req);
if (off >= i_size || !more)
break;
@@ -1553,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
break;
}
+ op = &req->r_ops[0];
+ if (!write && sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
if (len < 0) {
ceph_osdc_put_request(req);
@@ -1562,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (len != size)
osd_req_op_extent_update(req, 0, len);
+ osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+
/*
* To simplify error handling, allow AIO when IO within i_size
* or IO can be satisfied by single OSD request.
@@ -1593,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req->r_mtime = mtime;
}
- osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, size);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
-
if (aio_req) {
aio_req->total_len += len;
aio_req->num_reqs++;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 219a2cc2bf3c..785fe489ef4b 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2800,12 +2800,11 @@ retry:
if (pos < 0) {
/*
- * A rename didn't occur, but somehow we didn't end up where
- * we thought we would. Throw a warning and try again.
+ * The path is longer than PATH_MAX and this function
+ * cannot ever succeed. Creating paths that long is
+ * possible with Ceph, but Linux cannot use them.
*/
- pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
- pos);
- goto retry;
+ return ERR_PTR(-ENAMETOOLONG);
}
*pbase = base;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index de03cd6eb86e..4344e1f11806 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -431,6 +431,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
switch (token) {
case Opt_snapdirname:
+ if (strlen(param->string) > NAME_MAX)
+ return invalfc(fc, "snapdirname too long");
kfree(fsopt->snapdir_name);
fsopt->snapdir_name = param->string;
param->string = NULL;
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 1c49f8962021..0cd6b5c4df98 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -56,10 +56,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
buf->file = NULL;
if (erofs_is_fileio_mode(sbi)) {
- buf->file = sbi->fdev; /* some fs like FUSE needs it */
+ buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
buf->mapping = buf->file->f_mapping;
} else if (erofs_is_fscache_mode(sb))
- buf->mapping = sbi->s_fscache->inode->i_mapping;
+ buf->mapping = sbi->dif0.fscache->inode->i_mapping;
else
buf->mapping = sb->s_bdev->bd_mapping;
}
@@ -179,19 +179,13 @@ out:
}
static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
- struct erofs_device_info *dif)
+ struct super_block *sb, struct erofs_device_info *dif)
{
+ map->m_sb = sb;
+ map->m_dif = dif;
map->m_bdev = NULL;
- map->m_fp = NULL;
- if (dif->file) {
- if (S_ISBLK(file_inode(dif->file)->i_mode))
- map->m_bdev = file_bdev(dif->file);
- else
- map->m_fp = dif->file;
- }
- map->m_daxdev = dif->dax_dev;
- map->m_dax_part_off = dif->dax_part_off;
- map->m_fscache = dif->fscache;
+ if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
+ map->m_bdev = file_bdev(dif->file);
}
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
@@ -201,12 +195,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
erofs_off_t startoff, length;
int id;
- map->m_bdev = sb->s_bdev;
- map->m_daxdev = EROFS_SB(sb)->dax_dev;
- map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
- map->m_fscache = EROFS_SB(sb)->s_fscache;
- map->m_fp = EROFS_SB(sb)->fdev;
-
+ erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
+ map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
if (map->m_deviceid) {
down_read(&devs->rwsem);
dif = idr_find(&devs->tree, map->m_deviceid - 1);
@@ -219,7 +209,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return 0;
}
- erofs_fill_from_devinfo(map, dif);
+ erofs_fill_from_devinfo(map, sb, dif);
up_read(&devs->rwsem);
} else if (devs->extra_devices && !devs->flatdev) {
down_read(&devs->rwsem);
@@ -232,7 +222,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
- erofs_fill_from_devinfo(map, dif);
+ erofs_fill_from_devinfo(map, sb, dif);
break;
}
}
@@ -302,7 +292,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->offset = map.m_la;
if (flags & IOMAP_DAX)
- iomap->dax_dev = mdev.m_daxdev;
+ iomap->dax_dev = mdev.m_dif->dax_dev;
else
iomap->bdev = mdev.m_bdev;
iomap->length = map.m_llen;
@@ -331,7 +321,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
if (flags & IOMAP_DAX)
- iomap->addr += mdev.m_dax_part_off;
+ iomap->addr += mdev.m_dif->dax_part_off;
}
return 0;
}
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index 3af96b1e2c2a..33f8539dda4a 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -9,6 +9,7 @@ struct erofs_fileio_rq {
struct bio_vec bvecs[BIO_MAX_VECS];
struct bio bio;
struct kiocb iocb;
+ struct super_block *sb;
};
struct erofs_fileio {
@@ -52,8 +53,9 @@ static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
rq->iocb.ki_ioprio = get_current_ioprio();
rq->iocb.ki_complete = erofs_fileio_ki_complete;
- rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ?
- IOCB_DIRECT : 0;
+ if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
+ rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
+ rq->iocb.ki_flags = IOCB_DIRECT;
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
rq->bio.bi_iter.bi_size);
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
@@ -67,7 +69,8 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
GFP_KERNEL | __GFP_NOFAIL);
bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
- rq->iocb.ki_filp = mdev->m_fp;
+ rq->iocb.ki_filp = mdev->m_dif->file;
+ rq->sb = mdev->m_sb;
return rq;
}
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index fda16eedafb5..ce3d8737df85 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -198,7 +198,7 @@ struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
- io->io.private = mdev->m_fscache->cookie;
+ io->io.private = mdev->m_dif->fscache->cookie;
io->io.end_io = erofs_fscache_bio_endio;
refcount_set(&io->io.ref, 1);
return &io->bio;
@@ -316,7 +316,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
if (!io)
return -ENOMEM;
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
- ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie,
+ ret = erofs_fscache_read_io_async(mdev.m_dif->fscache->cookie,
mdev.m_pa + (pos - map.m_la), io);
erofs_fscache_req_io_put(io);
@@ -657,7 +657,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
if (IS_ERR(fscache))
return PTR_ERR(fscache);
- sbi->s_fscache = fscache;
+ sbi->dif0.fscache = fscache;
return 0;
}
@@ -665,14 +665,14 @@ void erofs_fscache_unregister_fs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- erofs_fscache_unregister_cookie(sbi->s_fscache);
+ erofs_fscache_unregister_cookie(sbi->dif0.fscache);
if (sbi->domain)
erofs_fscache_domain_put(sbi->domain);
else
fscache_relinquish_volume(sbi->volume, NULL, false);
- sbi->s_fscache = NULL;
+ sbi->dif0.fscache = NULL;
sbi->volume = NULL;
sbi->domain = NULL;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1c847c30a918..686d835eb533 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -107,6 +107,7 @@ struct erofs_xattr_prefix_item {
};
struct erofs_sb_info {
+ struct erofs_device_info dif0;
struct erofs_mount_opts opt; /* options */
#ifdef CONFIG_EROFS_FS_ZIP
/* list for all registered superblocks, mainly for shrinker */
@@ -124,13 +125,9 @@ struct erofs_sb_info {
struct erofs_sb_lz4_info lz4;
#endif /* CONFIG_EROFS_FS_ZIP */
- struct file *fdev;
struct inode *packed_inode;
struct erofs_dev_context *devs;
- struct dax_device *dax_dev;
- u64 dax_part_off;
u64 total_blocks;
- u32 primarydevice_blocks;
u32 meta_blkaddr;
#ifdef CONFIG_EROFS_FS_XATTR
@@ -166,7 +163,6 @@ struct erofs_sb_info {
/* fscache support */
struct fscache_volume *volume;
- struct erofs_fscache *s_fscache;
struct erofs_domain *domain;
char *fsid;
char *domain_id;
@@ -180,6 +176,7 @@ struct erofs_sb_info {
#define EROFS_MOUNT_POSIX_ACL 0x00000020
#define EROFS_MOUNT_DAX_ALWAYS 0x00000040
#define EROFS_MOUNT_DAX_NEVER 0x00000080
+#define EROFS_MOUNT_DIRECT_IO 0x00000100
#define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
#define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
@@ -187,7 +184,7 @@ struct erofs_sb_info {
static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
{
- return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev;
+ return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file;
}
static inline bool erofs_is_fscache_mode(struct super_block *sb)
@@ -357,11 +354,9 @@ enum {
};
struct erofs_map_dev {
- struct erofs_fscache *m_fscache;
+ struct super_block *m_sb;
+ struct erofs_device_info *m_dif;
struct block_device *m_bdev;
- struct dax_device *m_daxdev;
- struct file *m_fp;
- u64 m_dax_part_off;
erofs_off_t m_pa;
unsigned int m_deviceid;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c235a8e4315e..f5956474bfde 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -203,7 +203,7 @@ static int erofs_scan_devices(struct super_block *sb,
struct erofs_device_info *dif;
int id, err = 0;
- sbi->total_blocks = sbi->primarydevice_blocks;
+ sbi->total_blocks = sbi->dif0.blocks;
if (!erofs_sb_has_device_table(sbi))
ondisk_extradevs = 0;
else
@@ -307,7 +307,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi->sb_size);
goto out;
}
- sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
+ sbi->dif0.blocks = le32_to_cpu(dsb->blocks);
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
@@ -364,14 +364,8 @@ static void erofs_default_options(struct erofs_sb_info *sbi)
}
enum {
- Opt_user_xattr,
- Opt_acl,
- Opt_cache_strategy,
- Opt_dax,
- Opt_dax_enum,
- Opt_device,
- Opt_fsid,
- Opt_domain_id,
+ Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
+ Opt_device, Opt_fsid, Opt_domain_id, Opt_directio,
Opt_err
};
@@ -398,6 +392,7 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
fsparam_string("device", Opt_device),
fsparam_string("fsid", Opt_fsid),
fsparam_string("domain_id", Opt_domain_id),
+ fsparam_flag_no("directio", Opt_directio),
{}
};
@@ -511,6 +506,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
break;
#endif
+ case Opt_directio:
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (result.boolean)
+ set_opt(&sbi->opt, DIRECT_IO);
+ else
+ clear_opt(&sbi->opt, DIRECT_IO);
+#else
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+#endif
+ break;
default:
return -ENOPARAM;
}
@@ -602,9 +607,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
return -EINVAL;
}
- sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
- &sbi->dax_part_off,
- NULL, NULL);
+ sbi->dif0.dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
+ &sbi->dif0.dax_part_off, NULL, NULL);
}
err = erofs_read_superblock(sb);
@@ -627,7 +631,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
}
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
- if (!sbi->dax_dev) {
+ if (!sbi->dif0.dax_dev) {
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
clear_opt(&sbi->opt, DAX_ALWAYS);
} else if (sbi->blkszbits != PAGE_SHIFT) {
@@ -703,16 +707,18 @@ static int erofs_fc_get_tree(struct fs_context *fc)
GET_TREE_BDEV_QUIET_LOOKUP : 0);
#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
if (ret == -ENOTBLK) {
+ struct file *file;
+
if (!fc->source)
return invalf(fc, "No source specified");
- sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(sbi->fdev))
- return PTR_ERR(sbi->fdev);
+ file = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ sbi->dif0.file = file;
- if (S_ISREG(file_inode(sbi->fdev)->i_mode) &&
- sbi->fdev->f_mapping->a_ops->read_folio)
+ if (S_ISREG(file_inode(sbi->dif0.file)->i_mode) &&
+ sbi->dif0.file->f_mapping->a_ops->read_folio)
return get_tree_nodev(fc, erofs_fc_fill_super);
- fput(sbi->fdev);
}
#endif
return ret;
@@ -763,19 +769,24 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
kfree(devs);
}
-static void erofs_fc_free(struct fs_context *fc)
+static void erofs_sb_free(struct erofs_sb_info *sbi)
{
- struct erofs_sb_info *sbi = fc->s_fs_info;
-
- if (!sbi)
- return;
-
erofs_free_dev_context(sbi->devs);
kfree(sbi->fsid);
kfree(sbi->domain_id);
+ if (sbi->dif0.file)
+ fput(sbi->dif0.file);
kfree(sbi);
}
+static void erofs_fc_free(struct fs_context *fc)
+{
+ struct erofs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) /* free here if an error occurs before transferring to sb */
+ erofs_sb_free(sbi);
+}
+
static const struct fs_context_operations erofs_context_ops = {
.parse_param = erofs_fc_parse_param,
.get_tree = erofs_fc_get_tree,
@@ -809,19 +820,14 @@ static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev)
+ if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) ||
+ sbi->dif0.file)
kill_anon_super(sb);
else
kill_block_super(sb);
-
- erofs_free_dev_context(sbi->devs);
- fs_put_dax(sbi->dax_dev, NULL);
+ fs_put_dax(sbi->dif0.dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
- kfree(sbi->fsid);
- kfree(sbi->domain_id);
- if (sbi->fdev)
- fput(sbi->fdev);
- kfree(sbi);
+ erofs_sb_free(sbi);
sb->s_fs_info = NULL;
}
@@ -947,6 +953,8 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",dax=always");
if (test_opt(opt, DAX_NEVER))
seq_puts(seq, ",dax=never");
+ if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
+ seq_puts(seq, ",directio");
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (sbi->fsid)
seq_printf(seq, ",fsid=%s", sbi->fsid);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 01f147505487..19ef4ff2a134 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1792,9 +1792,9 @@ drain_io:
erofs_fscache_submit_bio(bio);
else
submit_bio(bio);
- if (memstall)
- psi_memstall_leave(&pflags);
}
+ if (memstall)
+ psi_memstall_leave(&pflags);
/*
* although background is preferred, no one is pending for submission.
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 75704f58ecfa..0dd65cefce33 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -230,9 +230,10 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex);
- /* clean up all remaining pclusters in memory */
- z_erofs_shrink_scan(sbi, ~0UL);
-
+ while (!xa_empty(&sbi->managed_pslots)) {
+ z_erofs_shrink_scan(sbi, ~0UL);
+ cond_resched();
+ }
spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list);
spin_unlock(&erofs_sb_list_lock);
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index fe0a9b8a0cd0..3103b932b674 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -122,7 +122,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED) {
brelse(bh);
- break;
+ goto out;
}
if (type != TYPE_FILE && type != TYPE_DIR) {
@@ -170,6 +170,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
}
}
+out:
dir_entry->namebuf.lfn[0] = '\0';
*cpos = EXFAT_DEN_TO_B(dentry);
return 0;
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index 773c320d68f3..9e5492ac409b 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -216,6 +216,16 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
if (err)
goto dec_used_clus;
+
+ if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
+ /*
+ * The cluster chain includes a loop, scan the
+ * bitmap to get the number of used clusters.
+ */
+ exfat_count_used_clusters(sb, &sbi->used_clusters);
+
+ return 0;
+ }
} while (clu != EXFAT_EOF_CLUSTER);
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index fb38769c3e39..05b51e721783 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -545,6 +545,7 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
while (pos < new_valid_size) {
u32 len;
struct folio *folio;
+ unsigned long off;
len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
if (pos + len > new_valid_size)
@@ -554,6 +555,9 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
if (err)
goto out;
+ off = offset_in_folio(folio, pos);
+ folio_zero_new_buffers(folio, off, off + len);
+
err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
@@ -563,6 +567,8 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
cond_resched();
}
+ return 0;
+
out:
return err;
}
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 97d2774760fe..099f80645072 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -330,8 +330,8 @@ static int exfat_find_empty_entry(struct inode *inode,
while ((dentry = exfat_search_empty_slot(sb, &hint_femp, p_dir,
num_entries, es)) < 0) {
- if (dentry == -EIO)
- break;
+ if (dentry != -ENOSPC)
+ return dentry;
if (exfat_check_max_dentries(inode))
return -ENOSPC;
diff --git a/fs/file.c b/fs/file.c
index fb1011cf6b4a..25c6e53b03f8 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -22,6 +22,7 @@
#include <linux/close_range.h>
#include <linux/file_ref.h>
#include <net/sock.h>
+#include <linux/init_task.h>
#include "internal.h"
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 494ac372ace0..e540d05549ff 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1681,6 +1681,8 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
*/
if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
nonseekable_open(inode, file);
+ if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
}
return err;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 88d0946b5bc9..7d92a5479998 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1541,8 +1541,10 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
*/
struct page **pages = kzalloc(max_pages * sizeof(struct page *),
GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
while (nbytes < *nbytesp && nr_pages < max_pages) {
unsigned nfolios, i;
@@ -1557,18 +1559,22 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
nbytes += ret;
- ret += start;
- /* Currently, all folios in FUSE are one page */
- nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
+ nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
+
+ for (i = 0; i < nfolios; i++) {
+ struct folio *folio = page_folio(pages[i]);
+ unsigned int offset = start +
+ (folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
+ unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start);
- ap->descs[ap->num_folios].offset = start;
- fuse_folio_descs_length_init(ap->descs, ap->num_folios, nfolios);
- for (i = 0; i < nfolios; i++)
- ap->folios[i + ap->num_folios] = page_folio(pages[i]);
+ ap->descs[ap->num_folios].offset = offset;
+ ap->descs[ap->num_folios].length = len;
+ ap->folios[ap->num_folios] = folio;
+ start = 0;
+ ret -= len;
+ ap->num_folios++;
+ }
- ap->num_folios += nfolios;
- ap->descs[ap->num_folios - 1].length -=
- (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
nr_pages += nfolios;
}
kfree(pages);
@@ -1584,6 +1590,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
else
ap->args.out_pages = true;
+out:
*nbytesp = nbytes;
return ret < 0 ? ret : 0;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 3bee9b5dba5e..fe09c2093a93 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -349,11 +349,13 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto bail_no_root;
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
if (!res) {
- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ if (fd.entrylength != sizeof(rec.dir)) {
res = -EIO;
goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ if (rec.type != HFS_CDR_DIR)
+ res = -EIO;
}
if (res)
goto bail_hfs_find;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 90f883d6b8fd..fc1ae5132127 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -825,7 +825,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
error = PTR_ERR(folio);
goto out;
}
- folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
+ folio_zero_user(folio, addr);
__folio_mark_uptodate(folio);
error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 955f19e27e47..d303e6c8900c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1138,7 +1138,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
start_byte, end_byte, iomap, punch);
/* move offset to start of next folio in range */
- start_byte = folio_next_index(folio) << PAGE_SHIFT;
+ start_byte = folio_pos(folio) + folio_size(folio);
folio_unlock(folio);
folio_put(folio);
}
@@ -1774,7 +1774,8 @@ static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
*/
static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, loff_t pos, unsigned len)
+ struct inode *inode, loff_t pos, loff_t end_pos,
+ unsigned len)
{
struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
@@ -1793,15 +1794,60 @@ new_ioend:
if (ifs)
atomic_add(len, &ifs->write_bytes_pending);
+
+ /*
+ * Clamp io_offset and io_size to the incore EOF so that ondisk
+ * file size updates in the ioend completion are byte-accurate.
+ * This avoids recovering files with zeroed tail regions when
+ * writeback races with appending writes:
+ *
+ * Thread 1: Thread 2:
+ * ------------ -----------
+ * write [A, A+B]
+ * update inode size to A+B
+ * submit I/O [A, A+BS]
+ * write [A+B, A+B+C]
+ * update inode size to A+B+C
+ * <I/O completes, updates disk size to min(A+B+C, A+BS)>
+ * <power failure>
+ *
+ * After reboot:
+ * 1) with A+B+C < A+BS, the file has zero padding in range
+ * [A+B, A+B+C]
+ *
+ * |< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|
+ * ^ ^ ^
+ * A A+B A+B+C
+ * (EOF)
+ *
+ * 2) with A+B+C > A+BS, the file has zero padding in range
+ * [A+B, A+BS]
+ *
+ * |< Block Size (BS) >|< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
+ * ^ ^ ^ ^
+ * A A+B A+BS A+B+C
+ * (EOF)
+ *
+ * D = Valid Data
+ * 0 = Zero Padding
+ *
+ * Note that this defeats the ability to chain the ioends of
+ * appending writes.
+ */
wpc->ioend->io_size += len;
+ if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
+ wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
+
wbc_account_cgroup_owner(wbc, folio, len);
return 0;
}
static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, u64 pos, unsigned dirty_len,
- unsigned *count)
+ struct inode *inode, u64 pos, u64 end_pos,
+ unsigned dirty_len, unsigned *count)
{
int error;
@@ -1826,7 +1872,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
break;
default:
error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
- map_len);
+ end_pos, map_len);
if (!error)
(*count)++;
break;
@@ -1897,11 +1943,11 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
* remaining memory is zeroed when mapped, and writes to that
* region are not written out to the file.
*
- * Also adjust the writeback range to skip all blocks entirely
- * beyond i_size.
+ * Also adjust the end_pos to the end of file and skip writeback
+ * for all blocks entirely beyond i_size.
*/
folio_zero_segment(folio, poff, folio_size(folio));
- *end_pos = round_up(isize, i_blocksize(inode));
+ *end_pos = isize;
}
return true;
@@ -1914,6 +1960,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct inode *inode = folio->mapping->host;
u64 pos = folio_pos(folio);
u64 end_pos = pos + folio_size(folio);
+ u64 end_aligned = 0;
unsigned count = 0;
int error = 0;
u32 rlen;
@@ -1955,9 +2002,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
/*
* Walk through the folio to find dirty areas to write back.
*/
- while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
+ end_aligned = round_up(end_pos, i_blocksize(inode));
+ while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
- pos, rlen, &count);
+ pos, end_pos, rlen, &count);
if (error)
break;
pos += rlen;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 9153ff3a08e7..e8e80761ac73 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -772,9 +772,9 @@ start_journal_io:
/*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
- * the commit record
+ * the commit record and update the journal tail sequence.
*/
- if (commit_transaction->t_need_data_flush &&
+ if ((commit_transaction->t_need_data_flush || update_tail) &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev);
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 4556e4689024..ce63d5fde9c3 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal,
set_buffer_jwrite(descriptor);
BUFFER_TRACE(descriptor, "write");
set_buffer_dirty(descriptor);
- write_dirty_buffer(descriptor, REQ_SYNC);
+ write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS);
}
#endif
diff --git a/fs/mount.h b/fs/mount.h
index 185fc56afc13..179f690a0c72 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -38,6 +38,7 @@ struct mount {
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
union {
+ struct rb_node mnt_node; /* node in the ns->mounts rbtree */
struct rcu_head mnt_rcu;
struct llist_node mnt_llist;
};
@@ -51,10 +52,7 @@ struct mount {
struct list_head mnt_child; /* and going through their mnt_child */
struct list_head mnt_instance; /* mount instance on sb->s_mounts */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
- union {
- struct rb_node mnt_node; /* Under ns->mounts */
- struct list_head mnt_list;
- };
+ struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */
struct list_head mnt_slave_list;/* list of slave mounts */
@@ -145,11 +143,16 @@ static inline bool is_anon_ns(struct mnt_namespace *ns)
return ns->seq == 0;
}
+static inline bool mnt_ns_attached(const struct mount *mnt)
+{
+ return !RB_EMPTY_NODE(&mnt->mnt_node);
+}
+
static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
{
- WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB));
- mnt->mnt.mnt_flags &= ~MNT_ONRB;
+ WARN_ON(!mnt_ns_attached(mnt));
rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts);
+ RB_CLEAR_NODE(&mnt->mnt_node);
list_add_tail(&mnt->mnt_list, dt_list);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 23e81c2a1e3f..eac057e56948 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -344,6 +344,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
+ RB_CLEAR_NODE(&mnt->mnt_node);
mnt->mnt.mnt_idmap = &nop_mnt_idmap;
}
return mnt;
@@ -1124,7 +1125,7 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
struct rb_node **link = &ns->mounts.rb_node;
struct rb_node *parent = NULL;
- WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
+ WARN_ON(mnt_ns_attached(mnt));
mnt->mnt_ns = ns;
while (*link) {
parent = *link;
@@ -1135,7 +1136,6 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
}
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
- mnt->mnt.mnt_flags |= MNT_ONRB;
}
/*
@@ -1305,7 +1305,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags;
- mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
+ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
atomic_inc(&sb->s_active);
mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
@@ -1763,7 +1763,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
- if (p->mnt.mnt_flags & MNT_ONRB)
+ if (mnt_ns_attached(p))
move_from_ns(p, &tmp_list);
else
list_move(&p->mnt_list, &tmp_list);
@@ -1912,16 +1912,14 @@ static int do_umount(struct mount *mnt, int flags)
event++;
if (flags & MNT_DETACH) {
- if (mnt->mnt.mnt_flags & MNT_ONRB ||
- !list_empty(&mnt->mnt_list))
+ if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
- if (mnt->mnt.mnt_flags & MNT_ONRB ||
- !list_empty(&mnt->mnt_list))
+ if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
@@ -2055,9 +2053,15 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry)
{
+ struct ns_common *ns;
+
/* Is this a proxy for a mount namespace? */
- return dentry->d_op == &ns_dentry_operations &&
- dentry->d_fsdata == &mntns_operations;
+ if (dentry->d_op != &ns_dentry_operations)
+ return false;
+
+ ns = d_inode(dentry)->i_private;
+
+ return ns->ops == &mntns_operations;
}
struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 7ac34550c403..4dc9b8286355 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
- if (ret < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false,
- netfs_sreq_trace_put_cancel);
- break;
- }
+ if (ret < 0)
+ goto prep_failed;
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}
slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
- ret = slice;
- break;
- }
+ if (slice < 0)
+ goto prep_iter_failed;
rreq->netfs_ops->issue_read(subreq);
goto done;
@@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0)
+ goto prep_iter_failed;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_read_subreq_terminated(subreq, 0, false);
goto done;
@@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0)
+ goto prep_iter_failed;
netfs_read_cache_to_pagecache(rreq, subreq);
goto done;
}
@@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
WARN_ON_ONCE(1);
break;
+ prep_iter_failed:
+ ret = slice;
+ prep_failed:
+ subreq->error = ret;
+ atomic_dec(&rreq->nr_outstanding);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ break;
+
done:
size -= slice;
start += slice;
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index 88f2adfab75e..f9421f3e6d37 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -67,7 +67,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
* allocate a sufficiently large bvec array and may shorten the
* request.
*/
- if (async || user_backed_iter(iter)) {
+ if (user_backed_iter(iter)) {
n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
if (n < 0) {
ret = n;
@@ -77,6 +77,11 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
} else {
+ /* If this is a kernel-generated async DIO request,
+ * assume that any resources the iterator points to
+ * (eg. a bio_vec array) will persist till the end of
+ * the op.
+ */
wreq->iter = *iter;
}
@@ -104,7 +109,6 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE);
- smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error;
if (ret == 0) {
ret = wreq->transferred;
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 3cbb289535a8..e8624f5c7fcc 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -62,10 +62,14 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
} else {
trace_netfs_folio(folio, netfs_folio_trace_read_done);
}
+
+ folioq_clear(folioq, slot);
} else {
// TODO: Use of PG_private_2 is deprecated.
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
+ else
+ folioq_clear(folioq, slot);
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
@@ -77,8 +81,6 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
folio_unlock(folio);
}
}
-
- folioq_clear(folioq, slot);
}
/*
@@ -247,16 +249,17 @@ donation_changed:
/* Deal with the trickiest case: that this subreq is in the middle of a
* folio, not touching either edge, but finishes first. In such a
- * case, we donate to the previous subreq, if there is one, so that the
- * donation is only handled when that completes - and remove this
- * subreq from the list.
+ * case, we donate to the previous subreq, if there is one and if it is
+ * contiguous, so that the donation is only handled when that completes
+ * - and remove this subreq from the list.
*
* If the previous subreq finished first, we will have acquired their
* donation and should be able to unlock folios and/or donate nextwards.
*/
if (!subreq->consumed &&
!prev_donated &&
- !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
+ !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
+ subreq->start == prev->start + prev->len) {
prev = list_prev_entry(subreq, rreq_link);
WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
subreq->start += subreq->len;
@@ -378,8 +381,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq)
task_io_account_read(rreq->transferred);
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+ clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, false);
@@ -438,7 +440,7 @@ void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)) {
netfs_consume_read_data(subreq, was_async);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
}
EXPORT_SYMBOL(netfs_read_subreq_progress);
@@ -497,7 +499,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)) {
netfs_consume_read_data(subreq, was_async);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
rreq->transferred += subreq->transferred;
}
@@ -511,10 +513,13 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
} else {
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
if (subreq->transferred > subreq->consumed) {
- __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
- } else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+ /* If we didn't read new data, abandon retry. */
+ if (subreq->retry_count &&
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
+ }
+ } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
} else {
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
index ba5af89d37fa..54d5004fec18 100644
--- a/fs/netfs/read_pgpriv2.c
+++ b/fs/netfs/read_pgpriv2.c
@@ -170,6 +170,10 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache);
+ if (!wreq->io_streams[1].avail) {
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ goto couldnt_start;
+ }
for (;;) {
error = netfs_pgpriv2_copy_folio(wreq, folio);
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
index 0350592ea804..16b676c68dcd 100644
--- a/fs/netfs/read_retry.c
+++ b/fs/netfs/read_retry.c
@@ -49,13 +49,15 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
* up to the first permanently failed one.
*/
if (!rreq->netfs_ops->prepare_read &&
- !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
+ !rreq->cache_resources.ops) {
struct netfs_io_subrequest *subreq;
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
netfs_reset_iter(subreq);
netfs_reissue_read(rreq, subreq);
}
@@ -137,7 +139,8 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
stream0->sreq_max_len = subreq->len;
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
spin_lock_bh(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
@@ -149,7 +152,8 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
BUG_ON(!len);
/* Renegotiate max_len (rsize) */
- if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ if (rreq->netfs_ops->prepare_read &&
+ rreq->netfs_ops->prepare_read(subreq) < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
}
@@ -213,7 +217,6 @@ abandon:
subreq->error = -ENOMEM;
__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
}
spin_lock_bh(&rreq->lock);
list_splice_tail_init(&queue, &rreq->subrequests);
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 1d438be2e1b4..ca3a11ed9b54 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -179,7 +179,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
struct iov_iter source = subreq->io_iter;
iov_iter_revert(&source, subreq->len - source.count);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_write(stream, subreq, &source);
}
@@ -234,7 +233,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
/* Renegotiate max_len (wsize) */
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ subreq->retry_count++;
stream->prepare_write(subreq);
part = min(len, stream->sreq_max_len);
@@ -279,7 +278,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
subreq->start = start;
subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
subreq->stream_nr = to->stream_nr;
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ subreq->retry_count = 1;
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref),
@@ -501,8 +500,7 @@ reassess_streams:
goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
- clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
+ clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags);
}
if (notes & NEED_REASSESS) {
@@ -605,8 +603,7 @@ void netfs_write_collection_worker(struct work_struct *work)
_debug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+ clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
if (wreq->iocb) {
size_t written = min(wreq->transferred, wreq->len);
@@ -714,8 +711,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
- clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
- wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
+ clear_and_wake_up_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
/* If we are at the head of the queue, wake up the collector,
* transferring a ref to it if we were the ones to do so.
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index bf6d507578e5..ff0e82505a0b 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -244,6 +244,8 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
iov_iter_advance(source, size);
iov_iter_truncate(&subreq->io_iter, size);
+ subreq->retry_count++;
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_do_issue_write(stream, subreq);
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 810269ee0a50..d49e4ce27999 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -263,6 +263,12 @@ int nfs_netfs_readahead(struct readahead_control *ractl)
static atomic_t nfs_netfs_debug_id;
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{
+ if (!file) {
+ if (WARN_ON_ONCE(rreq->origin != NETFS_PGPRIV2_COPY_TO_CACHE))
+ return -EIO;
+ return 0;
+ }
+
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
@@ -274,7 +280,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{
- put_nfs_open_context(rreq->netfs_priv);
+ if (rreq->netfs_priv)
+ put_nfs_open_context(rreq->netfs_priv);
}
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0d16b383a452..5f582713bf05 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1308,7 +1308,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
enum pnfs_iomode *iomode)
{
/* Serialise LAYOUTGET/LAYOUTRETURN */
- if (atomic_read(&lo->plh_outstanding) != 0)
+ if (atomic_read(&lo->plh_outstanding) != 0 && lo->plh_return_seq == 0)
return false;
if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
return false;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ae5c5e39afa0..aeb715b4a690 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -73,6 +73,7 @@
#include "nfs.h"
#include "netns.h"
#include "sysfs.h"
+#include "nfs4idmap.h"
#define NFSDBG_FACILITY NFSDBG_VFS
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index eacafe46e3b6..aa4712362b3b 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -40,24 +40,15 @@
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
-static void expkey_put_work(struct work_struct *work)
+static void expkey_put(struct kref *ref)
{
- struct svc_expkey *key =
- container_of(to_rcu_work(work), struct svc_expkey, ek_rcu_work);
+ struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
if (test_bit(CACHE_VALID, &key->h.flags) &&
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree(key);
-}
-
-static void expkey_put(struct kref *ref)
-{
- struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
-
- INIT_RCU_WORK(&key->ek_rcu_work, expkey_put_work);
- queue_rcu_work(system_wq, &key->ek_rcu_work);
+ kfree_rcu(key, ek_rcu);
}
static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -364,26 +355,16 @@ static void export_stats_destroy(struct export_stats *stats)
EXP_STATS_COUNTERS_NUM);
}
-static void svc_export_put_work(struct work_struct *work)
+static void svc_export_put(struct kref *ref)
{
- struct svc_export *exp =
- container_of(to_rcu_work(work), struct svc_export, ex_rcu_work);
-
+ struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(exp->ex_stats);
kfree(exp->ex_stats);
kfree(exp->ex_uuid);
- kfree(exp);
-}
-
-static void svc_export_put(struct kref *ref)
-{
- struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
-
- INIT_RCU_WORK(&exp->ex_rcu_work, svc_export_put_work);
- queue_rcu_work(system_wq, &exp->ex_rcu_work);
+ kfree_rcu(exp, ex_rcu);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 6f2fbaae01fa..4d92b99c1ffd 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -75,7 +75,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
- struct rcu_work ex_rcu_work;
+ struct rcu_head ex_rcu;
unsigned long ex_xprtsec_modes;
struct export_stats *ex_stats;
};
@@ -92,7 +92,7 @@ struct svc_expkey {
u32 ek_fsid[6];
struct path ek_path;
- struct rcu_work ek_rcu_work;
+ struct rcu_head ek_rcu;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3877b53e429f..c083e539e898 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1100,7 +1100,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt)
+ if (!conn->cb_xprt || !ses)
return -EINVAL;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
@@ -1522,8 +1522,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
ses = c->cn_session;
}
spin_unlock(&clp->cl_lock);
- if (!c)
- return;
err = setup_callback_client(clp, &conn, ses);
if (err) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f8a10f90bc7a..ad44ad49274f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1347,7 +1347,6 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(&copy->refcount))
return;
- atomic_dec(&copy->cp_nn->pending_async_copies);
kfree(copy->cp_src);
kfree(copy);
}
@@ -1870,6 +1869,7 @@ do_callback:
set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
trace_nfsd_copy_async_done(copy);
nfsd4_send_cb_offload(copy);
+ atomic_dec(&copy->cp_nn->pending_async_copies);
return 0;
}
@@ -1927,19 +1927,19 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Arbitrary cap on number of pending async copy operations */
if (atomic_inc_return(&nn->pending_async_copies) >
(int)rqstp->rq_pool->sp_nrthreads)
- goto out_err;
+ goto out_dec_async_copy_err;
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
if (!async_copy->cp_src)
- goto out_err;
+ goto out_dec_async_copy_err;
if (!nfs4_init_copy_state(nn, copy))
- goto out_err;
+ goto out_dec_async_copy_err;
memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
sizeof(result->cb_stateid));
dup_copy_fields(copy, async_copy);
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
if (IS_ERR(async_copy->copy_task))
- goto out_err;
+ goto out_dec_async_copy_err;
spin_lock(&async_copy->cp_clp->async_lock);
list_add(&async_copy->copies,
&async_copy->cp_clp->async_copies);
@@ -1954,6 +1954,9 @@ out:
trace_nfsd_copy_done(copy, status);
release_copy_files(copy);
return status;
+out_dec_async_copy_err:
+ if (async_copy)
+ atomic_dec(&nn->pending_async_copies);
out_err:
if (nfsd4_ssc_is_inter(copy)) {
/*
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 501ad7be5174..54a3fa0cf67e 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,6 +35,7 @@ void nilfs_init_btnc_inode(struct inode *btnc_inode)
ii->i_flags = 0;
memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+ btnc_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index ace22253fed0..2dbb15767df1 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -163,7 +163,7 @@ int nilfs_init_gcinode(struct inode *inode)
inode->i_mode = S_IFREG;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- inode->i_mapping->a_ops = &empty_aops;
+ inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index cf9ba481ae37..23f3a75edd50 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -276,6 +276,10 @@ const struct address_space_operations nilfs_aops = {
.is_partially_uptodate = block_is_partially_uptodate,
};
+const struct address_space_operations nilfs_buffer_cache_aops = {
+ .invalidate_folio = block_invalidate_folio,
+};
+
static int nilfs_insert_inode_locked(struct inode *inode,
struct nilfs_root *root,
unsigned long ino)
@@ -544,8 +548,14 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
inode = nilfs_iget_locked(sb, root, ino);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+
+ if (!(inode->i_state & I_NEW)) {
+ if (!inode->i_nlink) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return inode;
+ }
err = __nilfs_read_inode(sb, root, ino, inode);
if (unlikely(err)) {
@@ -675,6 +685,7 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+ s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 9b108052d9f7..1d836a5540f3 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -67,6 +67,11 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
inode = NULL;
} else {
inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ nilfs_error(dir->i_sb,
+ "deleted inode referenced: %lu", ino);
+ return ERR_PTR(-EIO);
+ }
}
return d_splice_alias(inode, dentry);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 45d03826eaf1..dff241c53fc5 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -401,6 +401,7 @@ extern const struct file_operations nilfs_dir_operations;
extern const struct inode_operations nilfs_file_inode_operations;
extern const struct file_operations nilfs_file_operations;
extern const struct address_space_operations nilfs_aops;
+extern const struct address_space_operations nilfs_buffer_cache_aops;
extern const struct inode_operations nilfs_dir_inode_operations;
extern const struct inode_operations nilfs_special_inode_operations;
extern const struct inode_operations nilfs_symlink_inode_operations;
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index dec553034027..e933f9c65d90 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -47,10 +47,8 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
size = f->handle_bytes >> 2;
ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size);
- if ((ret == FILEID_INVALID) || (ret < 0)) {
- WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ if ((ret == FILEID_INVALID) || (ret < 0))
return;
- }
f->handle_type = ret;
f->handle_bytes = size * sizeof(u32);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 8ac42ea81a17..d1aa04a5af1b 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -971,9 +971,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
start = count = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
- while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
- left) {
- if (bit_off == start) {
+ while (1) {
+ bit_off = ocfs2_find_next_zero_bit(bitmap, left, start);
+ if ((bit_off < left) && (bit_off == start)) {
count++;
start++;
continue;
@@ -998,29 +998,12 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
}
}
+ if (bit_off >= left)
+ break;
count = 1;
start = bit_off + 1;
}
- /* clear the contiguous bits until the end boundary */
- if (count) {
- blkno = la_start_blk +
- ocfs2_clusters_to_blocks(osb->sb,
- start - count);
-
- trace_ocfs2_sync_local_to_main_free(
- count, start - count,
- (unsigned long long)la_start_blk,
- (unsigned long long)blkno);
-
- status = ocfs2_release_clusters(handle,
- main_bm_inode,
- main_bm_bh, blkno,
- count);
- if (status < 0)
- mlog_errno(status);
- }
-
bail:
if (status)
mlog_errno(status);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 2b0daced98eb..3404e7a30c33 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -893,7 +893,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
int status = 0;
trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
- if (!sb_has_quota_loaded(sb, type)) {
+ if (!sb_has_quota_active(sb, type)) {
status = -ESRCH;
goto out;
}
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 73d3367c533b..2956d888c131 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -867,6 +867,7 @@ out:
brelse(oinfo->dqi_libh);
brelse(oinfo->dqi_lqi_bh);
kfree(oinfo);
+ info->dqi_priv = NULL;
return status;
}
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 3601ddfeddc2..0c28e5fa3407 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -415,13 +415,13 @@ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
return err;
}
-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
+struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
bool is_upper)
{
struct ovl_fh *fh;
int fh_type, dwords;
int buflen = MAX_HANDLE_SZ;
- uuid_t *uuid = &real->d_sb->s_uuid;
+ uuid_t *uuid = &realinode->i_sb->s_uuid;
int err;
/* Make sure the real fid stays 32bit aligned */
@@ -438,13 +438,13 @@ struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
* the price or reconnecting the dentry.
*/
dwords = buflen >> 2;
- fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
+ fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid,
+ &dwords, NULL, 0);
buflen = (dwords << 2);
err = -EIO;
- if (WARN_ON(fh_type < 0) ||
- WARN_ON(buflen > MAX_HANDLE_SZ) ||
- WARN_ON(fh_type == FILEID_INVALID))
+ if (fh_type < 0 || fh_type == FILEID_INVALID ||
+ WARN_ON(buflen > MAX_HANDLE_SZ))
goto out_err;
fh->fb.version = OVL_FH_VERSION;
@@ -480,7 +480,7 @@ struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
if (!ovl_can_decode_fh(origin->d_sb))
return NULL;
- return ovl_encode_real_fh(ofs, origin, false);
+ return ovl_encode_real_fh(ofs, d_inode(origin), false);
}
int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
@@ -505,7 +505,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
const struct ovl_fh *fh;
int err;
- fh = ovl_encode_real_fh(ofs, upper, true);
+ fh = ovl_encode_real_fh(ofs, d_inode(upper), true);
if (IS_ERR(fh))
return PTR_ERR(fh);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 5868cb222955..444aeeccb6da 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -176,35 +176,37 @@ static int ovl_connect_layer(struct dentry *dentry)
*
* Return 0 for upper file handle, > 0 for lower file handle or < 0 on error.
*/
-static int ovl_check_encode_origin(struct dentry *dentry)
+static int ovl_check_encode_origin(struct inode *inode)
{
- struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ struct ovl_fs *ofs = OVL_FS(inode->i_sb);
bool decodable = ofs->config.nfs_export;
+ struct dentry *dentry;
+ int err;
/* No upper layer? */
if (!ovl_upper_mnt(ofs))
return 1;
/* Lower file handle for non-upper non-decodable */
- if (!ovl_dentry_upper(dentry) && !decodable)
+ if (!ovl_inode_upper(inode) && !decodable)
return 1;
/* Upper file handle for pure upper */
- if (!ovl_dentry_lower(dentry))
+ if (!ovl_inode_lower(inode))
return 0;
/*
* Root is never indexed, so if there's an upper layer, encode upper for
* root.
*/
- if (dentry == dentry->d_sb->s_root)
+ if (inode == d_inode(inode->i_sb->s_root))
return 0;
/*
* Upper decodable file handle for non-indexed upper.
*/
- if (ovl_dentry_upper(dentry) && decodable &&
- !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
+ if (ovl_inode_upper(inode) && decodable &&
+ !ovl_test_flag(OVL_INDEX, inode))
return 0;
/*
@@ -213,14 +215,23 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && decodable)
- return ovl_connect_layer(dentry);
+ if (!decodable || !S_ISDIR(inode->i_mode))
+ return 1;
+
+ dentry = d_find_any_alias(inode);
+ if (!dentry)
+ return -ENOENT;
+
+ err = ovl_connect_layer(dentry);
+ dput(dentry);
+ if (err < 0)
+ return err;
/* Lower file handle for indexed and non-upper dir/non-dir */
return 1;
}
-static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
+static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct inode *inode,
u32 *fid, int buflen)
{
struct ovl_fh *fh = NULL;
@@ -231,13 +242,13 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
* Check if we should encode a lower or upper file handle and maybe
* copy up an ancestor to make lower file handle connectable.
*/
- err = enc_lower = ovl_check_encode_origin(dentry);
+ err = enc_lower = ovl_check_encode_origin(inode);
if (enc_lower < 0)
goto fail;
/* Encode an upper or lower file handle */
- fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_dentry_lower(dentry) :
- ovl_dentry_upper(dentry), !enc_lower);
+ fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_inode_lower(inode) :
+ ovl_inode_upper(inode), !enc_lower);
if (IS_ERR(fh))
return PTR_ERR(fh);
@@ -251,8 +262,8 @@ out:
return err;
fail:
- pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n",
- dentry, err);
+ pr_warn_ratelimited("failed to encode file handle (ino=%lu, err=%i)\n",
+ inode->i_ino, err);
goto out;
}
@@ -260,19 +271,13 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
struct inode *parent)
{
struct ovl_fs *ofs = OVL_FS(inode->i_sb);
- struct dentry *dentry;
int bytes, buflen = *max_len << 2;
/* TODO: encode connectable file handles */
if (parent)
return FILEID_INVALID;
- dentry = d_find_any_alias(inode);
- if (!dentry)
- return FILEID_INVALID;
-
- bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen);
- dput(dentry);
+ bytes = ovl_dentry_to_fid(ofs, inode, fid, buflen);
if (bytes <= 0)
return FILEID_INVALID;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 7e27b7d4adee..cea820cb3b55 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -542,7 +542,7 @@ int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
struct ovl_fh *fh;
int err;
- fh = ovl_encode_real_fh(ofs, real, is_upper);
+ fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper);
err = PTR_ERR(fh);
if (IS_ERR(fh)) {
fh = NULL;
@@ -738,7 +738,7 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
struct ovl_fh *fh;
int err;
- fh = ovl_encode_real_fh(ofs, origin, false);
+ fh = ovl_encode_real_fh(ofs, d_inode(origin), false);
if (IS_ERR(fh))
return PTR_ERR(fh);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index b361f35762be..0021e2025020 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -865,7 +865,7 @@ int ovl_copy_up_with_data(struct dentry *dentry);
int ovl_maybe_copy_up(struct dentry *dentry, int flags);
int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new);
int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
+struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
bool is_upper);
struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin);
int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 38a5a3e9cba2..f02cd362309a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
}
for (; addr != end; addr += PAGE_SIZE, idx++) {
- unsigned long cur_flags = flags;
+ u64 cur_flags = flags;
pagemap_entry_t pme;
if (folio && (flags & PM_PRESENT) &&
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 85925ec0051a..3310d1ad4d0e 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -179,8 +179,7 @@ static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf)
*/
static const char *qnx6_checkroot(struct super_block *s)
{
- static char match_root[2][3] = {".\0\0", "..\0"};
- int i, error = 0;
+ int error = 0;
struct qnx6_dir_entry *dir_entry;
struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping;
@@ -189,11 +188,9 @@ static const char *qnx6_checkroot(struct super_block *s)
if (IS_ERR(folio))
return "error reading root directory";
dir_entry = kmap_local_folio(folio, 0);
- for (i = 0; i < 2; i++) {
- /* maximum 3 bytes - due to match_root limitation */
- if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
- error = 1;
- }
+ if (memcmp(dir_entry[0].de_fname, ".", 2) ||
+ memcmp(dir_entry[1].de_fname, "..", 3))
+ error = 1;
folio_release_kmap(folio, dir_entry);
if (error)
return "error reading root directory.";
diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
index 2aff6d1395ce..9f05f94e265a 100644
--- a/fs/smb/client/Kconfig
+++ b/fs/smb/client/Kconfig
@@ -2,7 +2,6 @@
config CIFS
tristate "SMB3 and CIFS support (advanced network filesystem)"
depends on INET
- select NETFS_SUPPORT
select NLS
select NLS_UCS2_UTILS
select CRYPTO
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 9d96b833015c..b800c9f585d8 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -398,7 +398,7 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
if (!cifs_inode)
return NULL;
- cifs_inode->cifsAttrs = 0x20; /* default */
+ cifs_inode->cifsAttrs = ATTR_ARCHIVE; /* default */
cifs_inode->time = 0;
/*
* Until the file is open and we have gotten oplock info back from the
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 754417cb3294..d26f9bbb5382 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -614,8 +614,6 @@ int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
void cifs_free_hash(struct shash_desc **sdesc);
int cifs_try_adding_channels(struct cifs_ses *ses);
-bool is_server_using_iface(struct TCP_Server_Info *server,
- struct cifs_server_iface *iface);
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index bd42a419458e..6cb1e81993f8 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1319,14 +1319,16 @@ cifs_readv_callback(struct mid_q_entry *mid)
}
if (rdata->result == -ENODATA) {
- __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
} else {
size_t trans = rdata->subreq.transferred + rdata->got_bytes;
if (trans < rdata->subreq.len &&
rdata->subreq.start + trans == ictx->remote_i_size) {
- __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ } else if (rdata->got_bytes > 0) {
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
}
@@ -1670,10 +1672,13 @@ cifs_writev_callback(struct mid_q_entry *mid)
if (written > wdata->subreq.len)
written &= 0xFFFF;
- if (written < wdata->subreq.len)
+ if (written < wdata->subreq.len) {
result = -ENOSPC;
- else
+ } else {
result = written;
+ if (written > 0)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
+ }
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 2372538a1211..ddcc9e514a0e 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -987,9 +987,13 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
msleep(125);
if (cifs_rdma_enabled(server))
smbd_destroy(server);
+
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
+
+ /* Release netns reference for the socket. */
+ put_net(cifs_net_ns(server));
}
if (!list_empty(&server->pending_mid_q)) {
@@ -1037,6 +1041,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
+ /* Release netns reference for this server. */
put_net(cifs_net_ns(server));
kfree(server->leaf_fullpath);
kfree(server);
@@ -1713,6 +1718,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
+
+ /* Grab netns reference for this server. */
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
@@ -1844,6 +1851,7 @@ smbd_connected:
out_err_crypto_release:
cifs_crypto_secmech_release(tcp_ses);
+ /* Release netns reference for this server. */
put_net(cifs_net_ns(tcp_ses));
out_err:
@@ -1852,8 +1860,10 @@ out_err:
cifs_put_tcp_session(tcp_ses->primary_server, false);
kfree(tcp_ses->hostname);
kfree(tcp_ses->leaf_fullpath);
- if (tcp_ses->ssocket)
+ if (tcp_ses->ssocket) {
sock_release(tcp_ses->ssocket);
+ put_net(cifs_net_ns(tcp_ses));
+ }
kfree(tcp_ses);
}
return ERR_PTR(rc);
@@ -3131,20 +3141,20 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket = server->ssocket;
} else {
struct net *net = cifs_net_ns(server);
- struct sock *sk;
- rc = __sock_create(net, sfamily, SOCK_STREAM,
- IPPROTO_TCP, &server->ssocket, 1);
+ rc = sock_create_kern(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket);
if (rc < 0) {
cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
return rc;
}
- sk = server->ssocket->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ /*
+ * Grab netns reference for the socket.
+ *
+ * It'll be released here, on error, or in clean_demultiplex_info() upon server
+ * teardown.
+ */
+ get_net(net);
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
@@ -3158,8 +3168,10 @@ generic_ip_connect(struct TCP_Server_Info *server)
}
rc = bind_socket(server);
- if (rc < 0)
+ if (rc < 0) {
+ put_net(cifs_net_ns(server));
return rc;
+ }
/*
* Eventually check for other socket options to change from
@@ -3196,6 +3208,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (rc < 0) {
cifs_dbg(FYI, "Error %d connecting to server\n", rc);
trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
+ put_net(cifs_net_ns(server));
sock_release(socket);
server->ssocket = NULL;
return rc;
@@ -3204,6 +3217,9 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (sport == htons(RFC1001_PORT))
rc = ip_rfc1001_connect(server);
+ if (rc < 0)
+ put_net(cifs_net_ns(server));
+
return rc;
}
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index a58a3333ecc3..3b2d33291a7e 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -990,7 +990,11 @@ int cifs_open(struct inode *inode, struct file *file)
}
/* Get the cached handle as SMB2 close is deferred */
- rc = cifs_get_readable_path(tcon, full_path, &cfile);
+ if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
+ rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
+ } else {
+ rc = cifs_get_readable_path(tcon, full_path, &cfile);
+ }
if (rc == 0) {
if (file->f_flags == cfile->f_flags) {
file->private_data = cfile;
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index 0f788031b740..e3f9213131c4 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -196,11 +196,28 @@ static struct vfsmount *cifs_do_automount(struct path *path)
struct smb3_fs_context tmp;
char *full_path;
struct vfsmount *mnt;
+ struct cifs_sb_info *mntpt_sb;
+ struct cifs_ses *ses;
if (IS_ROOT(mntpt))
return ERR_PTR(-ESTALE);
- cur_ctx = CIFS_SB(mntpt->d_sb)->ctx;
+ mntpt_sb = CIFS_SB(mntpt->d_sb);
+ ses = cifs_sb_master_tcon(mntpt_sb)->ses;
+ cur_ctx = mntpt_sb->ctx;
+
+ /*
+ * At this point, the root session should be in the mntpt sb. We should
+ * bring the sb context passwords in sync with the root session's
+ * passwords. This would help prevent unnecessary retries and password
+ * swaps for automounts.
+ */
+ mutex_lock(&ses->session_mutex);
+ rc = smb3_sync_session_ctx_passwords(mntpt_sb, ses);
+ mutex_unlock(&ses->session_mutex);
+
+ if (rc)
+ return ERR_PTR(rc);
fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt);
if (IS_ERR(fc))
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 3306fb655136..91d4d409cb1d 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -27,31 +27,6 @@ static int
cifs_ses_add_channel(struct cifs_ses *ses,
struct cifs_server_iface *iface);
-bool
-is_server_using_iface(struct TCP_Server_Info *server,
- struct cifs_server_iface *iface)
-{
- struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr;
- struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr;
- struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr;
- struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr;
-
- if (server->dstaddr.ss_family != iface->sockaddr.ss_family)
- return false;
- if (server->dstaddr.ss_family == AF_INET) {
- if (s4->sin_addr.s_addr != i4->sin_addr.s_addr)
- return false;
- } else if (server->dstaddr.ss_family == AF_INET6) {
- if (memcmp(&s6->sin6_addr, &i6->sin6_addr,
- sizeof(i6->sin6_addr)) != 0)
- return false;
- } else {
- /* unknown family.. */
- return false;
- }
- return true;
-}
-
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
{
int i;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 010eae9d6c47..0577556f0a41 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4615,6 +4615,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
}
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
server->credits, server->in_flight,
@@ -4840,10 +4841,14 @@ smb2_writev_callback(struct mid_q_entry *mid)
if (written > wdata->subreq.len)
written &= 0xFFFF;
- if (written < wdata->subreq.len)
+ cifs_stats_bytes_written(tcon, written);
+
+ if (written < wdata->subreq.len) {
wdata->result = -ENOSPC;
- else
+ } else if (written > 0) {
wdata->subreq.len = written;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
+ }
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
@@ -5012,7 +5017,7 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
}
#endif
- if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
+ if (wdata->subreq.retry_count > 0)
smb2_set_replay(server, &rqst);
cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
@@ -5156,6 +5161,7 @@ replay_again:
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
+ cifs_stats_bytes_written(io_parms->tcon, *nbytes);
trace_smb3_write_done(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
@@ -6204,7 +6210,7 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
req->StructureSize = cpu_to_le16(36);
total_len += 12;
- memcpy(req->LeaseKey, lease_key, 16);
+ memcpy(req->LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
req->LeaseState = lease_state;
flags |= CIFS_NO_RSP_BUF;
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index c14dd72e1b30..f8a40f65db6a 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -70,7 +70,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
atomic_set(&conn->req_running, 0);
atomic_set(&conn->r_count, 0);
atomic_set(&conn->refcnt, 1);
- atomic_set(&conn->mux_smb_requests, 0);
conn->total_credits = 1;
conn->outstanding_credits = 0;
@@ -120,8 +119,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
requests_queue = &conn->requests;
+ atomic_inc(&conn->req_running);
if (requests_queue) {
- atomic_inc(&conn->req_running);
spin_lock(&conn->request_lock);
list_add_tail(&work->request_entry, requests_queue);
spin_unlock(&conn->request_lock);
@@ -132,11 +131,14 @@ void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
+ atomic_dec(&conn->req_running);
+ if (waitqueue_active(&conn->req_running_q))
+ wake_up(&conn->req_running_q);
+
if (list_empty(&work->request_entry) &&
list_empty(&work->async_request_entry))
return;
- atomic_dec(&conn->req_running);
spin_lock(&conn->request_lock);
list_del_init(&work->request_entry);
spin_unlock(&conn->request_lock);
@@ -308,7 +310,7 @@ int ksmbd_conn_handler_loop(void *p)
{
struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
struct ksmbd_transport *t = conn->transport;
- unsigned int pdu_size, max_allowed_pdu_size;
+ unsigned int pdu_size, max_allowed_pdu_size, max_req;
char hdr_buf[4] = {0,};
int size;
@@ -318,6 +320,7 @@ int ksmbd_conn_handler_loop(void *p)
if (t->ops->prepare && t->ops->prepare(t))
goto out;
+ max_req = server_conf.max_inflight_req;
conn->last_active = jiffies;
set_freezable();
while (ksmbd_conn_alive(conn)) {
@@ -327,6 +330,13 @@ int ksmbd_conn_handler_loop(void *p)
kvfree(conn->request_buf);
conn->request_buf = NULL;
+recheck:
+ if (atomic_read(&conn->req_running) + 1 > max_req) {
+ wait_event_interruptible(conn->req_running_q,
+ atomic_read(&conn->req_running) < max_req);
+ goto recheck;
+ }
+
size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
if (size != sizeof(hdr_buf))
break;
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 8ddd5a3c7baf..b379ae4fdcdf 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -107,7 +107,6 @@ struct ksmbd_conn {
__le16 signing_algorithm;
bool binding;
atomic_t refcnt;
- atomic_t mux_smb_requests;
};
struct ksmbd_conn_ops {
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 3ba95bd8edeb..601e7fcbcf1e 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -270,7 +270,6 @@ static void handle_ksmbd_work(struct work_struct *wk)
ksmbd_conn_try_dequeue_request(work);
ksmbd_free_work_struct(work);
- atomic_dec(&conn->mux_smb_requests);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
@@ -300,11 +299,6 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
if (err)
return 0;
- if (atomic_inc_return(&conn->mux_smb_requests) >= conn->vals->max_credits) {
- atomic_dec_return(&conn->mux_smb_requests);
- return -ENOSPC;
- }
-
work = ksmbd_alloc_work_struct();
if (!work) {
pr_err("allocation for work failed\n");
@@ -367,6 +361,7 @@ static int server_conf_init(void)
server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
KSMBD_AUTH_MSKRB5;
#endif
+ server_conf.max_inflight_req = SMB2_MAX_CREDITS;
return 0;
}
diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
index 4fc529335271..94187628ff08 100644
--- a/fs/smb/server/server.h
+++ b/fs/smb/server/server.h
@@ -42,6 +42,7 @@ struct ksmbd_server_config {
struct smb_sid domain_sid;
unsigned int auth_mechs;
unsigned int max_connections;
+ unsigned int max_inflight_req;
char *conf[SERVER_CONF_WORK_GROUP + 1];
struct task_struct *dh_task;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 803b35b89513..772deec5b90f 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -695,6 +695,9 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
struct smb2_hdr *rsp_hdr;
struct ksmbd_work *in_work = ksmbd_alloc_work_struct();
+ if (!in_work)
+ return;
+
if (allocate_interim_rsp_buf(in_work)) {
pr_err("smb_allocate_rsp_buf failed!\n");
ksmbd_free_work_struct(in_work);
@@ -1097,6 +1100,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
return rc;
}
+ ksmbd_conn_lock(conn);
smb2_buf_len = get_rfc1002_len(work->request_buf);
smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
if (smb2_neg_size > smb2_buf_len) {
@@ -1247,6 +1251,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
ksmbd_conn_set_need_negotiate(conn);
err_out:
+ ksmbd_conn_unlock(conn);
if (rc)
rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
@@ -3989,6 +3994,26 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev);
posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink);
posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode & 0777);
+ switch (ksmbd_kstat->kstat->mode & S_IFMT) {
+ case S_IFDIR:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_DIR << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFLNK:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_SYMLINK << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFCHR:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_CHARDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFBLK:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_BLKDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFIFO:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_FIFO << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFSOCK:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_SOCKET << POSIX_FILETYPE_SHIFT);
+ }
+
posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
posix_info->DosAttributes =
S_ISDIR(ksmbd_kstat->kstat->mode) ?
@@ -5179,6 +5204,26 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
file_info->HardLinks = cpu_to_le32(stat.nlink);
file_info->Mode = cpu_to_le32(stat.mode & 0777);
+ switch (stat.mode & S_IFMT) {
+ case S_IFDIR:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_DIR << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFLNK:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_SYMLINK << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFCHR:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_CHARDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFBLK:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_BLKDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFIFO:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_FIFO << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFSOCK:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_SOCKET << POSIX_FILETYPE_SHIFT);
+ }
+
file_info->DeviceId = cpu_to_le32(stat.rdev);
/*
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index 649dacf7e8c4..17a0b18a8406 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -502,4 +502,14 @@ static inline void *smb2_get_msg(void *buf)
return buf + 4;
}
+#define POSIX_TYPE_FILE 0
+#define POSIX_TYPE_DIR 1
+#define POSIX_TYPE_SYMLINK 2
+#define POSIX_TYPE_CHARDEV 3
+#define POSIX_TYPE_BLKDEV 4
+#define POSIX_TYPE_FIFO 5
+#define POSIX_TYPE_SOCKET 6
+
+#define POSIX_FILETYPE_SHIFT 12
+
#endif /* _SMB2PDU_H */
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 48cda3350e5a..befaf42b84cc 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -319,8 +319,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
init_smb2_max_write_size(req->smb2_max_write);
if (req->smb2_max_trans)
init_smb2_max_trans_size(req->smb2_max_trans);
- if (req->smb2_max_credits)
+ if (req->smb2_max_credits) {
init_smb2_max_credits(req->smb2_max_credits);
+ server_conf.max_inflight_req =
+ req->smb2_max_credits;
+ }
if (req->smbd_max_io_size)
init_smbd_max_io_size(req->smbd_max_io_size);
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 0ef3c9f0bfeb..c3785a5434f9 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -2283,8 +2283,7 @@ out:
ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
if (ibdev) {
- if (rdma_frwr_is_supported(&ibdev->attrs))
- rdma_capable = true;
+ rdma_capable = rdma_frwr_is_supported(&ibdev->attrs);
ib_device_put(ibdev);
}
}
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 88d167a5f8b7..40f08eac519c 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1264,6 +1264,8 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
filepath,
flags,
path);
+ if (!is_last)
+ next[0] = '/';
if (err)
goto out2;
else if (is_last)
@@ -1271,7 +1273,6 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
path_put(parent_path);
*parent_path = *path;
- next[0] = '/';
remain_len -= filename_len + 1;
}
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h
index 7e7e491ff06f..2d7822644eff 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.h
+++ b/fs/xfs/libxfs/xfs_rtgroup.h
@@ -272,7 +272,7 @@ static inline int xfs_initialize_rtgroups(struct xfs_mount *mp,
}
# define xfs_rtgroup_extents(mp, rgno) (0)
-# define xfs_update_last_rtgroup_size(mp, rgno) (-EOPNOTSUPP)
+# define xfs_update_last_rtgroup_size(mp, rgno) (0)
# define xfs_rtgroup_lock(rtg, gf) ((void)0)
# define xfs_rtgroup_unlock(rtg, gf) ((void)0)
# define xfs_rtgroup_trans_join(tp, rtg, gf) ((void)0)
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index f11d475898f2..201c26322ede 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -87,8 +87,9 @@ xfs_dquot_detach_buf(
}
spin_unlock(&qlip->qli_lock);
if (bp) {
+ xfs_buf_lock(bp);
list_del_init(&qlip->qli_item.li_bio_list);
- xfs_buf_rele(bp);
+ xfs_buf_relse(bp);
}
}