}
EXPORT_SYMBOL(md_write_start);
+/* md_write_inc can only be called when md_write_start() has
+ * already been called at least once of the current request.
+ * It increments the counter and is useful when a single request
+ * is split into several parts. Each part causes an increment and
+ * so needs a matching md_write_end().
+ * Unlike md_write_start(), it is safe to call md_write_inc() inside
+ * a spinlocked region.
+ */
+void md_write_inc(struct mddev *mddev, struct bio *bi)
+{
+ if (bio_data_dir(bi) != WRITE)
+ return;
+ WARN_ON_ONCE(mddev->in_sync || mddev->ro);
+ atomic_inc(&mddev->writes_pending);
+}
+EXPORT_SYMBOL(md_write_inc);
+
void md_write_end(struct mddev *mddev)
{
if (atomic_dec_and_test(&mddev->writes_pending)) {
bi->bi_next = *bip;
*bip = bi;
raid5_inc_bi_active_stripes(bi);
+ md_write_inc(conf->mddev, bi);
if (forwrite) {
/* check if page is covered */
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
bi->bi_error = -EIO;
- if (!raid5_dec_bi_active_stripes(bi)) {
- md_write_end(conf->mddev);
+ md_write_end(conf->mddev);
+ if (!raid5_dec_bi_active_stripes(bi))
bio_list_add(return_bi, bi);
- }
bi = nextbi;
}
if (bitmap_end)
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
bi->bi_error = -EIO;
- if (!raid5_dec_bi_active_stripes(bi)) {
- md_write_end(conf->mddev);
+ md_write_end(conf->mddev);
+ if (!raid5_dec_bi_active_stripes(bi))
bio_list_add(return_bi, bi);
- }
bi = bi2;
}
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
- if (!raid5_dec_bi_active_stripes(wbi)) {
- md_write_end(conf->mddev);
+ md_write_end(conf->mddev);
+ if (!raid5_dec_bi_active_stripes(wbi))
bio_list_add(return_bi, wbi);
- }
wbi = wbi2;
}
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+ md_write_start(mddev, bi);
stripe_sectors = conf->chunk_sectors *
(conf->raid_disks - conf->max_degraded);
sh->dev[d].towrite = bi;
set_bit(R5_OVERWRITE, &sh->dev[d].flags);
raid5_inc_bi_active_stripes(bi);
+ md_write_inc(mddev, bi);
sh->overwrite_disks++;
}
spin_unlock_irq(&sh->stripe_lock);
release_stripe_plug(mddev, sh);
}
+ md_write_end(mddev);
remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
- md_write_end(mddev);
bio_endio(bi);
}
}
do_flush = bi->bi_opf & REQ_PREFLUSH;
}
- md_write_start(mddev, bi);
-
/*
* If array is degraded, better not do chunk aligned read because
* later we might have to read it again in order to reconstruct
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+ md_write_start(mddev, bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
}
finish_wait(&conf->wait_for_overlap, &w);
+ if (rw == WRITE)
+ md_write_end(mddev);
remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
- if ( rw == WRITE )
- md_write_end(mddev);
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);