}
EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
+void update_io_ticks(struct hd_struct *part, unsigned long now)
+{
+ unsigned long stamp;
+again:
+ stamp = READ_ONCE(part->stamp);
+ if (unlikely(stamp != now)) {
+ if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
+ __part_stat_add(part, io_ticks, 1);
+ }
+ }
+ if (part->partno) {
+ part = &part_to_disk(part)->part0;
+ goto again;
+ }
+}
+
void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part)
{
part_stat_lock();
- part_round_stats(q, part);
+ update_io_ticks(part, jiffies);
part_stat_inc(part, ios[sgrp]);
part_stat_add(part, sectors[sgrp], sectors);
part_inc_in_flight(q, part, op_is_write(op));
void generic_end_io_acct(struct request_queue *q, int req_op,
struct hd_struct *part, unsigned long start_time)
{
- unsigned long duration = jiffies - start_time;
+ unsigned long now = jiffies;
+ unsigned long duration = now - start_time;
const int sgrp = op_stat_group(req_op);
part_stat_lock();
+ update_io_ticks(part, now);
part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
- part_round_stats(q, part);
+ part_stat_add(part, time_in_queue, duration);
part_dec_in_flight(q, part, op_is_write(req_op));
part_stat_unlock();
}
EXPORT_SYMBOL(blk_get_request);
-static void part_round_stats_single(struct request_queue *q,
- struct hd_struct *part, unsigned long now,
- unsigned int inflight)
-{
- if (inflight) {
- __part_stat_add(part, time_in_queue,
- inflight * (now - part->stamp));
- __part_stat_add(part, io_ticks, (now - part->stamp));
- }
- part->stamp = now;
-}
-
-/**
- * part_round_stats() - Round off the performance stats on a struct disk_stats.
- * @q: target block queue
- * @part: target partition
- *
- * The average IO queue length and utilisation statistics are maintained
- * by observing the current state of the queue length and the amount of
- * time it has been in this state for.
- *
- * Normally, that accounting is done on IO completion, but that can result
- * in more than a second's worth of IO being accounted for within any one
- * second, leading to >100% utilisation. To deal with that, we call this
- * function to do a round-off before returning the results when reading
- * /proc/diskstats. This accounts immediately for all queue usage up to
- * the current jiffies and restarts the counters again.
- */
-void part_round_stats(struct request_queue *q, struct hd_struct *part)
-{
- struct hd_struct *part2 = NULL;
- unsigned long now = jiffies;
- unsigned int inflight[2];
- int stats = 0;
-
- if (part->stamp != now)
- stats |= 1;
-
- if (part->partno) {
- part2 = &part_to_disk(part)->part0;
- if (part2->stamp != now)
- stats |= 2;
- }
-
- if (!stats)
- return;
-
- part_in_flight(q, part, inflight);
-
- if (stats & 2)
- part_round_stats_single(q, part2, now, inflight[1]);
- if (stats & 1)
- part_round_stats_single(q, part, now, inflight[0]);
-}
-EXPORT_SYMBOL_GPL(part_round_stats);
-
void blk_put_request(struct request *req)
{
blk_mq_free_request(req);
part_stat_lock();
part = req->part;
+ update_io_ticks(part, jiffies);
part_stat_inc(part, ios[sgrp]);
part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
- part_round_stats(req->q, part);
+ part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
- part_round_stats(rq->q, part);
part_inc_in_flight(rq->q, part, rw);
rq->part = part;
}
+ update_io_ticks(part, jiffies);
+
part_stat_unlock();
}