if (blk_init_free_list(q))
goto out_init;
+ /*
+ * if caller didn't supply a lock, they get per-queue locking with
+ * our embedded lock
+ */
+ if (!lock) {
+ spin_lock_init(&q->__queue_lock);
+ lock = &q->__queue_lock;
+ }
+
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
shost->host_failed))
scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock);
- spin_lock(&sdev->sdev_lock);
+ spin_lock(sdev->request_queue->queue_lock);
sdev->device_busy--;
- spin_unlock_irqrestore(&sdev->sdev_lock, flags);
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
}
/*
struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
- q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
+ q = blk_init_queue(scsi_request_fn, NULL);
if (!q)
return NULL;
*/
sdev->borken = 1;
- spin_lock_init(&sdev->sdev_lock);
sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue) {
/* release fn is set up in scsi_sysfs_device_initialise, so
unsigned long queue_flags;
/*
- * protects queue structures from reentrancy
+ * protects queue structures from reentrancy. ->__queue_lock should
+ * _never_ be used directly, it is queue private. always use
+ * ->queue_lock.
*/
+ spinlock_t __queue_lock;
spinlock_t *queue_lock;
/*
struct list_head same_target_siblings; /* just the devices sharing same target id */
volatile unsigned short device_busy; /* commands actually active on low-level */
- spinlock_t sdev_lock; /* also the request queue_lock */
spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry;