struct vmscsi_request *vm_srb;
int ret;
- DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
- "req %p type %s start_sector %lu count %ld offset %d "
- "len %d\n", blkvsc_req,
- (blkvsc_req->write) ? "WRITE" : "READ",
- (unsigned long) blkvsc_req->sector_start,
- blkvsc_req->sector_count,
- blkvsc_req->request.data_buffer.offset,
- blkvsc_req->request.data_buffer.len);
storvsc_req = &blkvsc_req->request;
vm_srb = &storvsc_req->vstor_packet.vm_srb;
{
struct block_device_context *blkdev = bdev->bd_disk->private_data;
- DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
- blkdev->gd->disk_name);
mutex_lock(&blkvsc_mutex);
spin_lock(&blkdev->lock);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
- DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n",
- blkvsc_req);
vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
blkdev->num_outstanding_reqs--;
int ret = 0;
- DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
/* Flush the pending list first */
list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
list_for_each_entry_safe(comp_req, tmp2,
&pend_req->group->blkvsc_req_list,
req_entry) {
- DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
- "sect_start %lu sect_count %ld\n",
- comp_req,
- (unsigned long) comp_req->sector_start,
- comp_req->sector_count);
if (comp_req == pend_req)
break;
kmem_cache_free(blkdev->request_pool, comp_req);
}
- DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n",
- pend_req);
-
list_del(&pend_req->pend_entry);
list_del(&pend_req->req_entry);
* All the sectors have been xferred ie the
* request is done
*/
- DPRINT_DBG(BLKVSC_DRV,
- "blkvsc_cancel_pending_reqs() - "
- "req %p COMPLETED\n", pend_req->req);
kmem_cache_free(blkdev->request_pool,
pend_req->group);
}
unsigned long flags;
int ret;
- DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
if (!storvsc_drv_obj->base.dev_rm)
return -1;
if (!blkdev)
return;
- DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n",
- blkdev->users, blkdev->gd->disk_name);
-
spin_lock_irqsave(&blkdev->lock, flags);
blkdev->shutting_down = 1;
{
struct block_device_context *blkdev = disk->private_data;
- DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
- blkdev->gd->disk_name);
-
mutex_lock(&blkvsc_mutex);
spin_lock(&blkdev->lock);
if (blkdev->users == 1) {
int pending = 0;
struct blkvsc_request_group *group = NULL;
- DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu\n", blkdev, req,
- (unsigned long)blk_rq_pos(req));
-
/* Create a group to tie req to list of blkvsc_reqs */
group = kmem_cache_zalloc(blkdev->request_pool, GFP_ATOMIC);
if (!group)
* Map this bio into an existing or new storvsc request
*/
bio_for_each_segment(bvec, bio, seg_idx) {
- DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() "
- "- req %p bio %p bvec %p seg_idx %d "
- "databuf_idx %d\n", req, bio, bvec,
- seg_idx, databuf_idx);
-
/* Get a new storvsc request */
/* 1st-time */
if ((!blkvsc_req) ||
/* Handle the last one */
if (blkvsc_req) {
- DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n",
- blkdev, req, blkvsc_req->group,
- blkvsc_req->group->outstanding);
-
blkvsc_req->sector_start = start_sector;
sector_div(blkvsc_req->sector_start,
(blkdev->sector_size >> 9));
list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
if (pending) {
- DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to "
- "pending_list - blkvsc_req %p start_sect %lu"
- " sect_count %ld (%lu %ld)\n", blkvsc_req,
- (unsigned long)blkvsc_req->sector_start,
- blkvsc_req->sector_count,
- (unsigned long)start_sector,
- (unsigned long)num_sectors);
list_add_tail(&blkvsc_req->pend_entry,
&blkdev->pending_list);
&blkdev->pending_list);
}
- DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p "
- "start_sect %lu sect_count %ld (%lu %ld) "
- "ret %d\n", blkvsc_req,
- (unsigned long)blkvsc_req->sector_start,
- blkvsc_req->sector_count,
- (unsigned long)start_sector,
- num_sectors, ret);
}
}
/* Flush the pending list first */
list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
pend_entry) {
- DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n",
- pend_req);
ret = blkvsc_submit_request(pend_req,
blkvsc_request_completion);
struct request *req;
int ret = 0;
- DPRINT_DBG(BLKVSC_DRV, "- enter\n");
while ((req = blk_peek_request(queue)) != NULL) {
- DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
blkdev = req->rq_disk->private_data;
if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS) {
ret = blkvsc_do_pending_reqs(blkdev);
if (ret != 0) {
- DPRINT_DBG(BLKVSC_DRV,
- "- stop queue - pending_list not empty\n");
blk_stop_queue(queue);
break;
}
ret = blkvsc_do_request(blkdev, req);
if (ret > 0) {
- DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
blk_stop_queue(queue);
break;
} else if (ret < 0) {
- DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
blk_requeue_request(queue, req);
blk_stop_queue(queue);
break;
static int ide0_registered;
static int ide1_registered;
- DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
if (!storvsc_drv_obj->base.dev_add) {
DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
struct vmscsi_request *vm_srb;
- DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s "
- "sect_start %lu sect_count %ld len %d group outstd %d "
- "total outstd %d\n",
- blkdev, blkvsc_req, blkvsc_req->group,
- (blkvsc_req->write) ? "WRITE" : "READ",
- (unsigned long)blkvsc_req->sector_start,
- blkvsc_req->sector_count,
- blkvsc_req->request.data_buffer.len,
- blkvsc_req->group->outstanding,
- blkdev->num_outstanding_reqs);
-
spin_lock_irqsave(&blkdev->lock, flags);
blkdev->num_outstanding_reqs--;
list_for_each_entry_safe(comp_req, tmp,
&blkvsc_req->group->blkvsc_req_list,
req_entry) {
- DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
- "sect_start %lu sect_count %ld\n",
- comp_req,
- (unsigned long)comp_req->sector_start,
- comp_req->sector_count);
list_del(&comp_req->req_entry);
* All the sectors have been xferred ie the
* request is done
*/
- DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n",
- comp_req->req);
kmem_cache_free(blkdev->request_pool,
comp_req->group);
}