int ret;
if (flags & BLKDEV_DISCARD_SECURE)
- type |= REQ_SECURE;
+ op_flags |= REQ_SECURE;
blk_start_plug(&plug);
- ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+ ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags,
&bio);
if (!ret && bio) {
- ret = submit_bio_wait(type, bio);
+ ret = submit_bio_wait(bio);
if (ret == -EOPNOTSUPP)
ret = 0;
+ bio_put(bio);
}
blk_finish_plug(&plug);
}
}
- if (bio)
+ if (bio) {
- ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
}
}
- if (bio)
- return submit_bio_wait(bio);
+ if (bio) {
- ret = submit_bio_wait(WRITE, bio);
++ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ return ret;
+ }
return 0;
}
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
- if (shadow[j].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ bio_list_init(&info->bio_list);
+ INIT_LIST_HEAD(&info->requests);
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct bio_list merge_bio;
+ struct blk_shadow *shadow = rinfo->shadow;
+
+ for (j = 0; j < BLK_RING_SIZE(info); j++) {
+ /* Not in use? */
+ if (!shadow[j].request)
+ continue;
+
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
++ if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
++ req_op(shadow[i].request) == REQ_OP_DISCARD ||
++ shadow[j].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
++
+ /*
+ * Flush operations don't contain bios, so
+ * we need to requeue the whole request
+ */
+ list_add(&shadow[j].request->queuelist, &info->requests);
+ continue;
+ }
+ merge_bio.head = shadow[j].request->bio;
+ merge_bio.tail = shadow[j].request->biotail;
+ bio_list_merge(&info->bio_list, &merge_bio);
+ shadow[j].request->bio = NULL;
+ blk_mq_end_request(shadow[j].request, 0);
+ }
+ }
+
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);