if (extent_len)
endio_readpage_release_extent(tree, extent_start, extent_len,
uptodate);
- if (io_bio->end_io)
- io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
+ btrfs_io_bio_free_csum(io_bio);
bio_put(bio);
}
return ret;
}
-static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
-{
- if (bio->csum != bio->csum_inline) {
- kfree(bio->csum);
- bio->csum = NULL;
- }
-}
-
static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
btrfs_free_path(path);
return BLK_STS_RESOURCE;
}
- btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
} else {
btrfs_bio->csum = btrfs_bio->csum_inline;
}
dio_bio->bi_status = err;
dio_end_io(dio_bio);
-
- if (io_bio->end_io)
- io_bio->end_io(io_bio, blk_status_to_errno(err));
+ btrfs_io_bio_free_csum(io_bio);
bio_put(bio);
}
if (!ret)
return;
- if (io_bio->end_io)
- io_bio->end_io(io_bio, ret);
+ btrfs_io_bio_free_csum(io_bio);
free_ordered:
/*
* we allocate are actually btrfs_io_bios. We'll cram as much of
* struct btrfs_bio as we can into this over time.
*/
-typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err);
struct btrfs_io_bio {
unsigned int mirror_num;
unsigned int stripe_index;
u64 logical;
u8 *csum;
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
- btrfs_io_bio_end_io_t *end_io;
struct bvec_iter iter;
/*
* This member must come last, bio_alloc_bioset will allocate enough
return container_of(bio, struct btrfs_io_bio, bio);
}
+static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
+{
+ if (io_bio->csum != io_bio->csum_inline) {
+ kfree(io_bio->csum);
+ io_bio->csum = NULL;
+ }
+}
+
struct btrfs_bio_stripe {
struct btrfs_device *dev;
u64 physical;