return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
+ io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
if (bio_data_dir(io->base_bio) == READ)
kcryptd_queue_io(io);
return max_size;
bvm->bi_bdev = cc->dev->bdev;
- bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
+ bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
bio->bi_sector = dc->start_write +
- (bio->bi_sector - ti->begin);
+ dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read +
- (bio->bi_sector - ti->begin);
+ bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
{
struct linear_c *lc = ti->private;
- return lc->start + (bi_sector - ti->begin);
+ return lc->start + dm_target_offset(ti, bi_sector);
}
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
{
if (unlikely(!bio->bi_size))
return 0;
- return m->offset + (bio->bi_sector - m->ms->ti->begin);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- offset = bio->bi_sector - ti->begin;
+ offset = dm_target_offset(ti, bio->bi_sector);
chunk = offset >> sc->chunk_shift;
stripe = sector_div(chunk, sc->stripes);