blk-merge: compute bio->bi_seg_front_size efficiently
authorMing Lei <ming.lei@redhat.com>
Mon, 18 Dec 2017 12:22:14 +0000 (20:22 +0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 6 Jan 2018 16:18:00 +0000 (09:18 -0700)
It is enough to check and compute bio->bi_seg_front_size just
after the 1st segment is found, but current code checks that
for each bvec, which is inefficient.

This patch follows the way in  __blk_recalc_rq_segments()
for computing bio->bi_seg_front_size, and it is more efficient
and code becomes more readable too.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-merge.c

index f5dedd57dff6b40fb6e88faa532bb88a94fcde61..a476337a8ff449f747873f592b9adb615c074642 100644 (file)
@@ -146,22 +146,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                        bvprvp = &bvprv;
                        sectors += bv.bv_len >> 9;
 
-                       if (nsegs == 1 && seg_size > front_seg_size)
-                               front_seg_size = seg_size;
                        continue;
                }
 new_segment:
                if (nsegs == queue_max_segments(q))
                        goto split;
 
+               if (nsegs == 1 && seg_size > front_seg_size)
+                       front_seg_size = seg_size;
+
                nsegs++;
                bvprv = bv;
                bvprvp = &bvprv;
                seg_size = bv.bv_len;
                sectors += bv.bv_len >> 9;
 
-               if (nsegs == 1 && seg_size > front_seg_size)
-                       front_seg_size = seg_size;
        }
 
        do_split = false;
@@ -174,6 +173,8 @@ split:
                        bio = new;
        }
 
+       if (nsegs == 1 && seg_size > front_seg_size)
+               front_seg_size = seg_size;
        bio->bi_seg_front_size = front_seg_size;
        if (seg_size > bio->bi_seg_back_size)
                bio->bi_seg_back_size = seg_size;