btrfs-progs: use unaligned LE access almost everywhere

Use unaligned access helper for code that potentially or actually
accesses data that come from on-disk structures. This is for image or
chunk restore. This may pessimize some cases but is in general safer on
strict alignment architectures and has no effect on other architectures.

Related issue #770.

Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2024-05-29 23:07:18 +02:00
parent 7f2ccbb732
commit 3e91948c01
5 changed files with 36 additions and 42 deletions

View file

@ -1296,13 +1296,12 @@ static int read_chunk_tree(int fd, struct chunk **chunks, size_t *num_chunks)
chunk = &(*chunks)[*num_chunks]; chunk = &(*chunks)[*num_chunks];
chunk->offset = sh.offset; chunk->offset = sh.offset;
chunk->length = le64_to_cpu(item->length); chunk->length = get_unaligned_le64(&item->length);
chunk->stripe_len = le64_to_cpu(item->stripe_len); chunk->stripe_len = get_unaligned_le64(&item->stripe_len);
chunk->type = le64_to_cpu(item->type); chunk->type = get_unaligned_le64(&item->type);
chunk->num_stripes = le16_to_cpu(item->num_stripes); chunk->num_stripes = get_unaligned_le16(&item->num_stripes);
chunk->sub_stripes = le16_to_cpu(item->sub_stripes); chunk->sub_stripes = get_unaligned_le16(&item->sub_stripes);
chunk->stripes = calloc(chunk->num_stripes, chunk->stripes = calloc(chunk->num_stripes, sizeof(*chunk->stripes));
sizeof(*chunk->stripes));
if (!chunk->stripes) { if (!chunk->stripes) {
perror("calloc"); perror("calloc");
return -1; return -1;
@ -1313,8 +1312,8 @@ static int read_chunk_tree(int fd, struct chunk **chunks, size_t *num_chunks)
const struct btrfs_stripe *stripe; const struct btrfs_stripe *stripe;
stripe = &item->stripe + i; stripe = &item->stripe + i;
chunk->stripes[i].devid = le64_to_cpu(stripe->devid); chunk->stripes[i].devid = get_unaligned_le64(&stripe->devid);
chunk->stripes[i].offset = le64_to_cpu(stripe->offset); chunk->stripes[i].offset = get_unaligned_le64(&stripe->offset);
} }
next: next:
@ -1425,7 +1424,7 @@ static int map_physical_start(int fd, struct chunk *chunks, size_t num_chunks,
type = item->type; type = item->type;
if (type == BTRFS_FILE_EXTENT_REG || if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) { type == BTRFS_FILE_EXTENT_PREALLOC) {
logical_offset = le64_to_cpu(item->disk_bytenr); logical_offset = get_unaligned_le64(&item->disk_bytenr);
if (logical_offset) { if (logical_offset) {
/* Regular extent */ /* Regular extent */
chunk = find_chunk(chunks, num_chunks, logical_offset); chunk = find_chunk(chunks, num_chunks, logical_offset);
@ -1459,7 +1458,7 @@ static int map_physical_start(int fd, struct chunk *chunks, size_t num_chunks,
goto out; goto out;
} }
if (item->other_encoding != 0) { if (item->other_encoding != 0) {
error("file with other_encoding: %u", le16_to_cpu(item->other_encoding)); error("file with other_encoding: %u", get_unaligned_le16(&item->other_encoding));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }

View file

@ -99,9 +99,7 @@ static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
} }
static inline size_t read_compress_length(unsigned char *buf) static inline size_t read_compress_length(unsigned char *buf)
{ {
__le32 dlen; return get_unaligned_le32(buf);
memcpy(&dlen, buf, LZO_LEN);
return le32_to_cpu(dlen);
} }
static int decompress_lzo(struct btrfs_root *root, unsigned char *inbuf, static int decompress_lzo(struct btrfs_root *root, unsigned char *inbuf,

View file

@ -79,8 +79,7 @@ int detect_version(FILE *in)
fseek(in, 0, SEEK_SET); fseek(in, 0, SEEK_SET);
cluster = (struct meta_cluster *)buf; cluster = (struct meta_cluster *)buf;
for (i = 0; i < ARRAY_SIZE(dump_versions); i++) { for (i = 0; i < ARRAY_SIZE(dump_versions); i++) {
if (le64_to_cpu(cluster->header.magic) == if (get_unaligned_le64(&cluster->header.magic) == dump_versions[i].magic_cpu) {
dump_versions[i].magic_cpu) {
found = true; found = true;
current_version = &dump_versions[i]; current_version = &dump_versions[i];
break; break;

View file

@ -272,7 +272,7 @@ static int write_buffers(struct metadump_struct *md, u64 *next)
} }
/* write buffers */ /* write buffers */
bytenr += le64_to_cpu(header->bytenr) + IMAGE_BLOCK_SIZE; bytenr += get_unaligned_le64(&header->bytenr) + IMAGE_BLOCK_SIZE;
while (!list_empty(&md->ordered)) { while (!list_empty(&md->ordered)) {
async = list_entry(md->ordered.next, struct async_work, async = list_entry(md->ordered.next, struct async_work,
ordered); ordered);

View file

@ -1004,8 +1004,8 @@ static int search_for_chunk_blocks(struct mdrestore_struct *mdres)
ret = 0; ret = 0;
header = &cluster->header; header = &cluster->header;
if (le64_to_cpu(header->magic) != current_version->magic_cpu || if (get_unaligned_le64(&header->magic) != current_version->magic_cpu ||
le64_to_cpu(header->bytenr) != current_cluster) { get_unaligned_le64(&header->bytenr) != current_cluster) {
error("bad header in metadump image"); error("bad header in metadump image");
ret = -EIO; ret = -EIO;
goto out; goto out;
@ -1016,15 +1016,15 @@ static int search_for_chunk_blocks(struct mdrestore_struct *mdres)
goto out; goto out;
bytenr += IMAGE_BLOCK_SIZE; bytenr += IMAGE_BLOCK_SIZE;
nritems = le32_to_cpu(header->nritems); nritems = get_unaligned_le32(&header->nritems);
/* Search items for tree blocks in sys chunks */ /* Search items for tree blocks in sys chunks */
for (i = 0; i < nritems; i++) { for (i = 0; i < nritems; i++) {
size_t size; size_t size;
item = &cluster->items[i]; item = &cluster->items[i];
bufsize = le32_to_cpu(item->size); bufsize = get_unaligned_le32(&item->size);
item_bytenr = le64_to_cpu(item->bytenr); item_bytenr = get_unaligned_le64(&item->bytenr);
/* /*
* Only data extent/free space cache can be that big, * Only data extent/free space cache can be that big,
@ -1117,38 +1117,37 @@ static int build_chunk_tree(struct mdrestore_struct *mdres,
ret = 0; ret = 0;
header = &cluster->header; header = &cluster->header;
if (le64_to_cpu(header->magic) != current_version->magic_cpu || if (get_unaligned_le64(&header->magic) != current_version->magic_cpu ||
le64_to_cpu(header->bytenr) != 0) { get_unaligned_le64(&header->bytenr) != 0) {
error("bad header in metadump image"); error("bad header in metadump image");
return -EIO; return -EIO;
} }
mdres->compress_method = header->compress; mdres->compress_method = header->compress;
nritems = le32_to_cpu(header->nritems); nritems = get_unaligned_le32(&header->nritems);
for (i = 0; i < nritems; i++) { for (i = 0; i < nritems; i++) {
item = &cluster->items[i]; item = &cluster->items[i];
if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET) if (get_unaligned_le64(&item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
break; break;
if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) { if (fseek(mdres->in, get_unaligned_le32(&item->size), SEEK_CUR)) {
error("seek failed: %m"); error("seek failed: %m");
return -EIO; return -EIO;
} }
} }
if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) { if (!item || get_unaligned_le64(&item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
error("did not find superblock at %llu", error("did not find superblock at %llu", get_unaligned_le64(&item->bytenr));
le64_to_cpu(item->bytenr));
return -EINVAL; return -EINVAL;
} }
buffer = malloc(le32_to_cpu(item->size)); buffer = malloc(get_unaligned_le32(&item->size));
if (!buffer) { if (!buffer) {
error_msg(ERROR_MSG_MEMORY, NULL); error_msg(ERROR_MSG_MEMORY, NULL);
return -ENOMEM; return -ENOMEM;
} }
ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in); ret = fread(buffer, get_unaligned_le32(&item->size), 1, mdres->in);
if (ret != 1) { if (ret != 1) {
error("unable to read buffer: %m"); error("unable to read buffer: %m");
free(buffer); free(buffer);
@ -1164,8 +1163,7 @@ static int build_chunk_tree(struct mdrestore_struct *mdres,
free(buffer); free(buffer);
return -ENOMEM; return -ENOMEM;
} }
ret = uncompress(tmp, (unsigned long *)&size, ret = uncompress(tmp, (unsigned long *)&size, buffer, get_unaligned_le32(&item->size));
buffer, le32_to_cpu(item->size));
if (ret != Z_OK) { if (ret != Z_OK) {
error("decompression failed with %d", ret); error("decompression failed with %d", ret);
free(buffer); free(buffer);
@ -1198,7 +1196,7 @@ static int build_chunk_tree(struct mdrestore_struct *mdres,
memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE); memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
memcpy(mdres->uuid, super->dev_item.uuid, BTRFS_UUID_SIZE); memcpy(mdres->uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
mdres->devid = le64_to_cpu(super->dev_item.devid); mdres->devid = get_unaligned_le64(&super->dev_item.devid);
free(buffer); free(buffer);
pthread_mutex_unlock(&mdres->mutex); pthread_mutex_unlock(&mdres->mutex);
@ -1283,7 +1281,7 @@ static int fill_mdres_info(struct mdrestore_struct *mdres,
else else
memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE); memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
memcpy(mdres->uuid, super->dev_item.uuid, BTRFS_UUID_SIZE); memcpy(mdres->uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
mdres->devid = le64_to_cpu(super->dev_item.devid); mdres->devid = get_unaligned_le64(&super->dev_item.devid);
free(buffer); free(buffer);
return 0; return 0;
} }
@ -1302,8 +1300,8 @@ static int add_cluster(struct meta_cluster *cluster,
mdres->compress_method = header->compress; mdres->compress_method = header->compress;
pthread_mutex_unlock(&mdres->mutex); pthread_mutex_unlock(&mdres->mutex);
bytenr = le64_to_cpu(header->bytenr) + IMAGE_BLOCK_SIZE; bytenr = get_unaligned_le64(&header->bytenr) + IMAGE_BLOCK_SIZE;
nritems = le32_to_cpu(header->nritems); nritems = get_unaligned_le32(&header->nritems);
for (i = 0; i < nritems; i++) { for (i = 0; i < nritems; i++) {
item = &cluster->items[i]; item = &cluster->items[i];
async = calloc(1, sizeof(*async)); async = calloc(1, sizeof(*async));
@ -1311,8 +1309,8 @@ static int add_cluster(struct meta_cluster *cluster,
error_msg(ERROR_MSG_MEMORY, "async data"); error_msg(ERROR_MSG_MEMORY, "async data");
return -ENOMEM; return -ENOMEM;
} }
async->start = le64_to_cpu(item->bytenr); async->start = get_unaligned_le64(&item->bytenr);
async->bufsize = le32_to_cpu(item->size); async->bufsize = get_unaligned_le32(&item->size);
async->buffer = malloc(async->bufsize); async->buffer = malloc(async->bufsize);
if (!async->buffer) { if (!async->buffer) {
error_msg(ERROR_MSG_MEMORY, "async buffer"); error_msg(ERROR_MSG_MEMORY, "async buffer");
@ -1836,8 +1834,8 @@ int restore_metadump(const char *input, FILE *out, int old_restore,
break; break;
header = &cluster->header; header = &cluster->header;
if (le64_to_cpu(header->magic) != current_version->magic_cpu || if (get_unaligned_le64(&header->magic) != current_version->magic_cpu ||
le64_to_cpu(header->bytenr) != bytenr) { get_unaligned_le64(&header->bytenr) != bytenr) {
error("bad header in metadump image"); error("bad header in metadump image");
ret = -EIO; ret = -EIO;
break; break;