btrfs-progs: zoned: support zero out on zoned block device

If we zero out a region in a sequential write required zone, we cannot
write to the region until we reset the zone. Thus, we must prohibit zeroing
out to a sequential write required zone.

zero_dev_clamped() is modified to take the zone information and it calls
zero_zone_blocks() if the device is host managed to avoid writing to
sequential write required zones.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Naohiro Aota 2021-04-26 15:27:34 +09:00 committed by David Sterba
parent 58ec593892
commit 8bbb0c5744
4 changed files with 48 additions and 5 deletions

View file

@ -68,7 +68,7 @@ int discard_blocks(int fd, u64 start, u64 len)
return 0;
}
static int zero_blocks(int fd, off_t start, size_t len)
int zero_blocks(int fd, off_t start, size_t len)
{
char *buf = malloc(len);
int ret = 0;
@ -87,7 +87,8 @@ static int zero_blocks(int fd, off_t start, size_t len)
#define ZERO_DEV_BYTES SZ_2M
/* don't write outside the device by clamping the region to the device size */
static int zero_dev_clamped(int fd, off_t start, ssize_t len, u64 dev_size)
static int zero_dev_clamped(int fd, struct btrfs_zoned_device_info *zinfo,
off_t start, ssize_t len, u64 dev_size)
{
off_t end = max(start, start + len);
@ -100,6 +101,9 @@ static int zero_dev_clamped(int fd, off_t start, ssize_t len, u64 dev_size)
start = min_t(u64, start, dev_size);
end = min_t(u64, end, dev_size);
if (zinfo && zinfo->model == ZONED_HOST_MANAGED)
return zero_zone_blocks(fd, zinfo, start, end - start);
return zero_blocks(fd, start, end - start);
}
@ -210,12 +214,12 @@ int btrfs_prepare_device(int fd, const char *file, u64 *block_count_ret,
}
}
ret = zero_dev_clamped(fd, 0, ZERO_DEV_BYTES, block_count);
ret = zero_dev_clamped(fd, zinfo, 0, ZERO_DEV_BYTES, block_count);
for (i = 0 ; !ret && i < BTRFS_SUPER_MIRROR_MAX; i++)
ret = zero_dev_clamped(fd, btrfs_sb_offset(i),
ret = zero_dev_clamped(fd, zinfo, btrfs_sb_offset(i),
BTRFS_SUPER_INFO_SIZE, block_count);
if (!ret && (opflags & PREP_DEVICE_ZERO_END))
ret = zero_dev_clamped(fd, block_count - ZERO_DEV_BYTES,
ret = zero_dev_clamped(fd, zinfo, block_count - ZERO_DEV_BYTES,
ZERO_DEV_BYTES, block_count);
if (ret < 0) {

View file

@ -26,6 +26,7 @@
#define PREP_DEVICE_ZONED (1U << 3)
int discard_blocks(int fd, u64 start, u64 len);
int zero_blocks(int fd, off_t start, size_t len);
u64 get_partition_size(const char *dev);
u64 disk_size(const char *path);
u64 btrfs_device_size(int fd, struct stat *st);

View file

@ -389,6 +389,34 @@ int btrfs_reset_all_zones(int fd, struct btrfs_zoned_device_info *zinfo)
return fsync(fd);
}
int zero_zone_blocks(int fd, struct btrfs_zoned_device_info *zinfo, off_t start,
size_t len)
{
size_t zone_len = zinfo->zone_size;
off_t ofst = start;
size_t count;
int ret;
/* Make sure that zero_blocks does not write sequential zones */
while (len > 0) {
/* Limit zero_blocks to a single zone */
count = min_t(size_t, len, zone_len);
if (count > zone_len - (ofst & (zone_len - 1)))
count = zone_len - (ofst & (zone_len - 1));
if (!zone_is_sequential(zinfo, ofst)) {
ret = zero_blocks(fd, ofst, count);
if (ret != 0)
return ret;
}
len -= count;
ofst += count;
}
return 0;
}
static int sb_log_location(int fd, struct blk_zone *zones, int rw, u64 *bytenr_ret)
{
u64 wp;

View file

@ -96,6 +96,9 @@ int btrfs_reset_chunk_zones(struct btrfs_fs_info *fs_info, u64 devid,
u64 offset, u64 length);
int btrfs_reset_all_zones(int fd, struct btrfs_zoned_device_info *zinfo);
int zero_zone_blocks(int fd, struct btrfs_zoned_device_info *zinfo, off_t start,
size_t len);
#else
#define sbread(fd, buf, offset) \
@ -150,6 +153,13 @@ static inline int btrfs_reset_all_zones(int fd,
return -EOPNOTSUPP;
}
static inline int zero_zone_blocks(int fd,
struct btrfs_zoned_device_info *zinfo,
off_t start, size_t len)
{
return -EOPNOTSUPP;
}
#endif /* BTRFS_ZONED */
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)