uint64_t latest = 0;
int i, j, nr;
- blk_stat_init(&dst[BLK_STAT_READ]);
- blk_stat_init(&dst[BLK_STAT_WRITE]);
+ blk_stat_init(&dst[READ]);
+ blk_stat_init(&dst[WRITE]);
nr = 0;
do {
queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
- blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
- blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+ blk_stat_flush_batch(&ctx->stat[READ]);
+ blk_stat_flush_batch(&ctx->stat[WRITE]);
- if (!ctx->stat[BLK_STAT_READ].nr_samples &&
- !ctx->stat[BLK_STAT_WRITE].nr_samples)
+ if (!ctx->stat[READ].nr_samples &&
+ !ctx->stat[WRITE].nr_samples)
continue;
- if (ctx->stat[BLK_STAT_READ].time > newest)
- newest = ctx->stat[BLK_STAT_READ].time;
- if (ctx->stat[BLK_STAT_WRITE].time > newest)
- newest = ctx->stat[BLK_STAT_WRITE].time;
+ if (ctx->stat[READ].time > newest)
+ newest = ctx->stat[READ].time;
+ if (ctx->stat[WRITE].time > newest)
+ newest = ctx->stat[WRITE].time;
}
}
queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
- if (ctx->stat[BLK_STAT_READ].time == newest) {
- blk_stat_sum(&dst[BLK_STAT_READ],
- &ctx->stat[BLK_STAT_READ]);
+ if (ctx->stat[READ].time == newest) {
+ blk_stat_sum(&dst[READ],
+ &ctx->stat[READ]);
nr++;
}
- if (ctx->stat[BLK_STAT_WRITE].time == newest) {
- blk_stat_sum(&dst[BLK_STAT_WRITE],
- &ctx->stat[BLK_STAT_WRITE]);
+ if (ctx->stat[WRITE].time == newest) {
+ blk_stat_sum(&dst[WRITE],
+ &ctx->stat[WRITE]);
nr++;
}
}
*/
} while (!nr);
- dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
+ dst[READ].time = dst[WRITE].time = latest;
}
void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
if (q->mq_ops)
blk_mq_stat_get(q, dst);
else {
- blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
- blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
- memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
- sizeof(struct blk_rq_stat));
- memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
- sizeof(struct blk_rq_stat));
+ blk_stat_flush_batch(&q->rq_stats[READ]);
+ blk_stat_flush_batch(&q->rq_stats[WRITE]);
+ memcpy(&dst[READ], &q->rq_stats[READ],
+ sizeof(struct blk_rq_stat));
+ memcpy(&dst[WRITE], &q->rq_stats[WRITE],
+ sizeof(struct blk_rq_stat));
}
}
uint64_t newest = 0;
hctx_for_each_ctx(hctx, ctx, i) {
- blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
- blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+ blk_stat_flush_batch(&ctx->stat[READ]);
+ blk_stat_flush_batch(&ctx->stat[WRITE]);
- if (!ctx->stat[BLK_STAT_READ].nr_samples &&
- !ctx->stat[BLK_STAT_WRITE].nr_samples)
+ if (!ctx->stat[READ].nr_samples &&
+ !ctx->stat[WRITE].nr_samples)
continue;
- if (ctx->stat[BLK_STAT_READ].time > newest)
- newest = ctx->stat[BLK_STAT_READ].time;
- if (ctx->stat[BLK_STAT_WRITE].time > newest)
- newest = ctx->stat[BLK_STAT_WRITE].time;
+ if (ctx->stat[READ].time > newest)
+ newest = ctx->stat[READ].time;
+ if (ctx->stat[WRITE].time > newest)
+ newest = ctx->stat[WRITE].time;
}
if (!newest)
break;
hctx_for_each_ctx(hctx, ctx, i) {
- if (ctx->stat[BLK_STAT_READ].time == newest) {
- blk_stat_sum(&dst[BLK_STAT_READ],
- &ctx->stat[BLK_STAT_READ]);
+ if (ctx->stat[READ].time == newest) {
+ blk_stat_sum(&dst[READ], &ctx->stat[READ]);
nr++;
}
- if (ctx->stat[BLK_STAT_WRITE].time == newest) {
- blk_stat_sum(&dst[BLK_STAT_WRITE],
- &ctx->stat[BLK_STAT_WRITE]);
+ if (ctx->stat[WRITE].time == newest) {
+ blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
nr++;
}
}
queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
- blk_stat_init(&ctx->stat[BLK_STAT_READ]);
- blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+ blk_stat_init(&ctx->stat[READ]);
+ blk_stat_init(&ctx->stat[WRITE]);
}
}
} else {
- blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
- blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
+ blk_stat_init(&q->rq_stats[READ]);
+ blk_stat_init(&q->rq_stats[WRITE]);
}
}
* that it's writes impacting us, and not just some sole read on
* a device that is in a lower power state.
*/
- return stat[BLK_STAT_READ].nr_samples >= 1 &&
- stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES;
+ return (stat[READ].nr_samples >= 1 &&
+ stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
}
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
*/
thislat = rwb_sync_issue_lat(rwb);
if (thislat > rwb->cur_win_nsec ||
- (thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) {
+ (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
trace_wbt_lat(bdi, thislat);
return LAT_EXCEEDED;
}
* waited or still has writes in flights, consider us doing
* just writes as well.
*/
- if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) ||
+ if ((stat[WRITE].nr_samples && blk_stat_is_current(stat)) ||
wb_recent_wait(rwb) || wbt_inflight(rwb))
return LAT_UNKNOWN_WRITES;
return LAT_UNKNOWN;
/*
* If the 'min' latency exceeds our target, step down.
*/
- if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) {
- trace_wbt_lat(bdi, stat[BLK_STAT_READ].min);
+ if (stat[READ].min > rwb->min_lat_nsec) {
+ trace_wbt_lat(bdi, stat[READ].min);
trace_wbt_stat(bdi, stat);
return LAT_EXCEEDED;
}