*/
atomic_t has_dirty;
+ /*
+ * Set to zero by things that touch the backing volume-- except
+ * writeback. Incremented by writeback. Used to determine when to
+ * accelerate idle writeback.
+ */
+ atomic_t backing_idle;
+
struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
+ atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
delay = writeback_delay(dc, size);
+ /* If the control system would wait for at least half a
+ * second, and there's been no reqs hitting the backing disk
+ * for awhile: use an alternate mode where we have at most
+ * one contiguous set of writebacks in flight at a time. If
+ * someone wants to do IO it will be quick, as it will only
+ * have to contend with one operation in flight, and we'll
+ * be round-tripping data to the backing disk as quickly as
+ * it can accept it.
+ */
+ if (delay >= HZ / 2) {
+ /* 3 means at least 1.5 seconds, up to 7.5 if we
+ * have slowed way down.
+ */
+ if (atomic_inc_return(&dc->backing_idle) >= 3) {
+ /* Wait for current I/Os to finish */
+ closure_sync(&cl);
+ /* And immediately launch a new set. */
+ delay = 0;
+ }
+ }
+
while (!kthread_should_stop() && delay) {
schedule_timeout_interruptible(delay);
delay = writeback_delay(dc, 0);