summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJohn Garry <john.g.garry@oracle.com>2024-11-18 11:50:17 +0100
committerJens Axboe <axboe@kernel.dk>2024-11-19 18:30:02 +0100
commitf2a38abf5f1c5aeb3be8e9f4d3d815c867fff7ca (patch)
tree0df6df556de8b497f0ee3388611144e08d2fd7f8 /drivers/md
parentmd/raid0: Atomic write support (diff)
downloadlinux-f2a38abf5f1c5aeb3be8e9f4d3d815c867fff7ca.tar.xz
linux-f2a38abf5f1c5aeb3be8e9f4d3d815c867fff7ca.zip
md/raid1: Atomic write support
Set BLK_FEAT_ATOMIC_WRITES_STACKED to enable atomic writes. For an attempt to atomic write to a region which has bad blocks, error the write as we just cannot do this. It is unlikely to find devices which support atomic writes and bad blocks. Reviewed-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20241118105018.1870052-5-john.g.garry@oracle.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid1.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a5adf08ee174..519c56f0ee3d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1571,7 +1571,21 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
continue;
}
if (is_bad) {
- int good_sectors = first_bad - r1_bio->sector;
+ int good_sectors;
+
+ /*
+ * We cannot atomically write this, so just
+ * error in that case. It could be possible to
+ * atomically write other mirrors, but the
+ * complexity of supporting that is not worth
+ * the benefit.
+ */
+ if (bio->bi_opf & REQ_ATOMIC) {
+ error = -EIO;
+ goto err_handle;
+ }
+
+ good_sectors = first_bad - r1_bio->sector;
if (good_sectors < max_sectors)
max_sectors = good_sectors;
}
@@ -1657,7 +1671,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
+ mbio->bi_opf = bio_op(bio) |
+ (bio->bi_opf & (REQ_SYNC | REQ_FUA | REQ_ATOMIC));
if (test_bit(FailFast, &rdev->flags) &&
!test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
@@ -3224,6 +3239,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);