"%s: isci_device = %p\n", __func__, idev);
if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED)
- isci_remote_device_change_state(idev, isci_stopping);
+ set_bit(IDEV_GONE, &idev->flags);
else
/* device ready is actually a "not ready for io" state. */
isci_remote_device_change_state(idev, isci_ready);
/* cleanup requests that failed after starting on the port */
if (status != SCI_SUCCESS)
scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
- else
+ else {
+ kref_get(&sci_dev_to_idev(sci_dev)->kref);
scic_sds_remote_device_increment_request_count(sci_dev);
+ }
}
enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic,
"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
"could not complete\n", __func__, sci_port,
sci_dev, sci_req, status);
+ else
+ isci_put_device(sci_dev_to_idev(sci_dev));
return status;
}
* here should go through isci_remote_device_nuke_requests.
* If we hit this condition, we will need a way to complete
* io requests in process */
- while (!list_empty(&idev->reqs_in_process)) {
-
- dev_err(&ihost->pdev->dev,
- "%s: ** request list not empty! **\n", __func__);
- BUG();
- }
+ BUG_ON(!list_empty(&idev->reqs_in_process));
scic_remote_device_destruct(&idev->sci);
- idev->domain_dev->lldd_dev = NULL;
- idev->domain_dev = NULL;
- idev->isci_port = NULL;
list_del_init(&idev->node);
-
- clear_bit(IDEV_START_PENDING, &idev->flags);
- clear_bit(IDEV_STOP_PENDING, &idev->flags);
- clear_bit(IDEV_EH, &idev->flags);
- wake_up(&ihost->eventq);
+ isci_put_device(idev);
}
/**
return idev;
}
+void isci_remote_device_release(struct kref *kref)
+{
+ struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+ struct isci_host *ihost = idev->isci_port->isci_host;
+
+ idev->domain_dev = NULL;
+ idev->isci_port = NULL;
+ clear_bit(IDEV_START_PENDING, &idev->flags);
+ clear_bit(IDEV_STOP_PENDING, &idev->flags);
+ clear_bit(IDEV_GONE, &idev->flags);
+ clear_bit(IDEV_EH, &idev->flags);
+ smp_mb__before_clear_bit();
+ clear_bit(IDEV_ALLOCATED, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
/**
* isci_remote_device_stop() - This function is called internally to stop the
* remote device.
dev_dbg(&ihost->pdev->dev,
"%s: isci_device = %p\n", __func__, idev);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+ set_bit(IDEV_GONE, &idev->flags);
isci_remote_device_change_state(idev, isci_stopping);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Kill all outstanding requests. */
isci_remote_device_nuke_requests(ihost, idev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Wait for the stop complete callback. */
- if (status == SCI_SUCCESS) {
+ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+ /* nothing to wait for */;
+ else
wait_for_device_stop(ihost, idev);
- clear_bit(IDEV_ALLOCATED, &idev->flags);
- }
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev = %p - after completion wait\n",
- __func__, idev);
return status;
}
if (!isci_device)
return -ENODEV;
+ kref_init(&isci_device->kref);
INIT_LIST_HEAD(&isci_device->node);
- domain_dev->lldd_dev = isci_device;
+
+ spin_lock_irq(&isci_host->scic_lock);
isci_device->domain_dev = domain_dev;
isci_device->isci_port = isci_port;
isci_remote_device_change_state(isci_device, isci_starting);
-
-
- spin_lock_irq(&isci_host->scic_lock);
list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
set_bit(IDEV_START_PENDING, &isci_device->flags);
status = isci_remote_device_construct(isci_port, isci_device);
- spin_unlock_irq(&isci_host->scic_lock);
dev_dbg(&isci_host->pdev->dev,
"%s: isci_device = %p\n",
__func__, isci_device);
- if (status != SCI_SUCCESS) {
-
- spin_lock_irq(&isci_host->scic_lock);
- isci_remote_device_deconstruct(
- isci_host,
- isci_device
- );
- spin_unlock_irq(&isci_host->scic_lock);
- return -ENODEV;
- }
+ if (status == SCI_SUCCESS) {
+ /* device came up, advertise it to the world */
+ domain_dev->lldd_dev = isci_device;
+ } else
+ isci_put_device(isci_device);
+ spin_unlock_irq(&isci_host->scic_lock);
/* wait for the device ready callback. */
wait_for_device_start(isci_host, isci_device);
- return 0;
+ return status == SCI_SUCCESS ? 0 : -ENODEV;
}
/**
* isci_device_is_reset_pending() - This function will check if there is any
* none.
*/
static void isci_request_handle_controller_specific_errors(
- struct isci_remote_device *isci_device,
+ struct isci_remote_device *idev,
struct isci_request *request,
struct sas_task *task,
enum service_response *response_ptr,
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
*status_ptr = SAS_ABORTED_TASK;
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
*status_ptr = SAM_STAT_TASK_ABORTED;
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
*status_ptr = SAS_ABORTED_TASK;
struct ssp_response_iu *resp_iu;
void *resp_buf;
unsigned long task_flags;
- struct isci_remote_device *isci_device = request->isci_device;
+ struct isci_remote_device *idev = isci_lookup_device(task->dev);
enum service_response response = SAS_TASK_UNDELIVERED;
enum exec_status status = SAS_ABORTED_TASK;
enum isci_request_status request_status;
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
- if ((isci_device->status == isci_stopping)
- || (isci_device->status == isci_stopped)
- )
+ if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
request->complete_in_target = true;
response = SAS_TASK_UNDELIVERED;
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
/* The device has been /is being stopped. Note that
* we ignore the quiesce state, since we are
* concerned about the actual device state.
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
isci_request_handle_controller_specific_errors(
- isci_device, request, task, &response, &status,
+ idev, request, task, &response, &status,
&complete_to_host);
break;
/* Fail the I/O so it can be retried. */
response = SAS_TASK_UNDELIVERED;
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
* that we ignore the quiesce state, since we are
* concerned about the actual device state.
*/
- if ((isci_device->status == isci_stopping) ||
- (isci_device->status == isci_stopped))
+ if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
/* complete the io request to the core. */
scic_controller_complete_io(&isci_host->sci,
- &isci_device->sci,
+ request->sci.target_device,
&request->sci);
+ isci_put_device(idev);
+
/* set terminated handle so it cannot be completed or
* terminated again, and to cause any calls into abort
* task to recognize the already completed case.
}
static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
- struct isci_remote_device *idev,
gfp_t gfp_flags)
{
dma_addr_t handle;
spin_lock_init(&ireq->state_lock);
ireq->request_daddr = handle;
ireq->isci_host = ihost;
- ireq->isci_device = idev;
ireq->io_request_completion = NULL;
ireq->terminated = false;
static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
struct sas_task *task,
- struct isci_remote_device *idev,
gfp_t gfp_flags)
{
struct isci_request *ireq;
- ireq = isci_request_alloc_core(ihost, idev, gfp_flags);
+ ireq = isci_request_alloc_core(ihost, gfp_flags);
if (ireq) {
ireq->ttype_ptr.io_task_ptr = task;
ireq->ttype = io_task;
struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
struct isci_tmf *isci_tmf,
- struct isci_remote_device *idev,
gfp_t gfp_flags)
{
struct isci_request *ireq;
- ireq = isci_request_alloc_core(ihost, idev, gfp_flags);
+ ireq = isci_request_alloc_core(ihost, gfp_flags);
if (ireq) {
ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
ireq->ttype = tmf_task;
return ireq;
}
-int isci_request_execute(struct isci_host *ihost, struct sas_task *task,
- gfp_t gfp_flags)
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, gfp_t gfp_flags)
{
enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
- struct scic_sds_remote_device *sci_dev;
- struct isci_remote_device *idev;
struct isci_request *ireq;
unsigned long flags;
int ret = 0;
- idev = task->dev->lldd_dev;
- sci_dev = &idev->sci;
-
/* do common allocation and init of request object. */
- ireq = isci_request_alloc_io(ihost, task, idev, gfp_flags);
+ ireq = isci_request_alloc_io(ihost, task, gfp_flags);
if (!ireq)
goto out;
spin_lock_irqsave(&ihost->scic_lock, flags);
/* send the request, let the core assign the IO TAG. */
- status = scic_controller_start_io(&ihost->sci, sci_dev,
- &ireq->sci,
+ status = scic_controller_start_io(&ihost->sci, &idev->sci, &ireq->sci,
SCI_CONTROLLER_INVALID_IO_TAG);
if (status != SCI_SUCCESS &&
status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
{
struct isci_host *ihost = dev_to_ihost(task->dev);
- struct isci_remote_device *device;
+ struct isci_remote_device *idev;
unsigned long flags;
int ret;
enum sci_status status;
dev_dbg(&ihost->pdev->dev,
"task = %p, num = %d; dev = %p; cmd = %p\n",
task, num, task->dev, task->uldd_task);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(task->dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- device = task->dev->lldd_dev;
-
- if (device)
- device_status = device->status;
+ if (idev)
+ device_status = idev->status;
else
device_status = isci_freed;
__func__,
task,
isci_host_get_state(ihost),
- device,
+ idev,
device_status);
if (device_status == isci_ready) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */
- status = isci_request_execute(ihost, task, gfp_flags);
+ status = isci_request_execute(ihost, idev, task, gfp_flags);
if (status != SCI_SUCCESS) {
}
}
}
+ isci_put_device(idev);
}
return 0;
}
static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+ struct isci_remote_device *idev,
struct isci_tmf *isci_tmf)
{
- struct scic_sds_remote_device *sci_dev;
enum sci_status status = SCI_FAILURE;
struct isci_request *ireq = NULL;
- struct isci_remote_device *idev;
struct domain_device *dev;
dev_dbg(&ihost->pdev->dev,
"%s: isci_tmf = %p\n", __func__, isci_tmf);
- idev = isci_tmf->device;
- sci_dev = &idev->sci;
dev = idev->domain_dev;
/* do common allocation and init of request object. */
- ireq = isci_request_alloc_tmf(ihost, isci_tmf, idev, GFP_ATOMIC);
+ ireq = isci_request_alloc_tmf(ihost, isci_tmf, GFP_ATOMIC);
if (!ireq)
return NULL;
/* let the core do it's construct. */
- status = scic_task_request_construct(&ihost->sci, sci_dev,
+ status = scic_task_request_construct(&ihost->sci, &idev->sci,
SCI_CONTROLLER_INVALID_IO_TAG,
&ireq->sci);
return ireq;
}
-/**
- * isci_task_execute_tmf() - This function builds and sends a task request,
- * then waits for the completion.
- * @isci_host: This parameter specifies the ISCI host object
- * @tmf: This parameter is the pointer to the task management structure for
- * this request.
- * @timeout_ms: This parameter specifies the timeout period for the task
- * management request.
- *
- * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
- * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
- */
-int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf,
- unsigned long timeout_ms)
+int isci_task_execute_tmf(struct isci_host *ihost,
+ struct isci_remote_device *isci_device,
+ struct isci_tmf *tmf, unsigned long timeout_ms)
{
DECLARE_COMPLETION_ONSTACK(completion);
enum sci_task_status status = SCI_TASK_FAILURE;
struct scic_sds_remote_device *sci_device;
- struct isci_remote_device *isci_device = tmf->device;
struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
/* Assign the pointer to the TMF's completion kernel wait structure. */
tmf->complete = &completion;
- ireq = isci_task_request_build(ihost, tmf);
+ ireq = isci_task_request_build(ihost, isci_device, tmf);
if (!ireq) {
dev_warn(&ihost->pdev->dev,
"%s: isci_task_request_build failed\n",
if (tmf->cb_state_func != NULL)
tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
- status = scic_controller_terminate_request(
- &ireq->isci_host->sci,
- &ireq->isci_device->sci,
- &ireq->sci);
+ status = scic_controller_terminate_request(&ihost->sci,
+ &isci_device->sci,
+ &ireq->sci);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
void isci_task_build_tmf(
struct isci_tmf *tmf,
- struct isci_remote_device *isci_device,
enum isci_tmf_function_codes code,
void (*tmf_sent_cb)(enum isci_tmf_cb_state,
struct isci_tmf *,
void *),
void *cb_data)
{
- dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
- "%s: isci_device = %p\n", __func__, isci_device);
-
memset(tmf, 0, sizeof(*tmf));
- tmf->device = isci_device;
tmf->tmf_code = code;
-
tmf->cb_state_func = tmf_sent_cb;
tmf->cb_data = cb_data;
}
static void isci_task_build_abort_task_tmf(
struct isci_tmf *tmf,
- struct isci_remote_device *isci_device,
enum isci_tmf_function_codes code,
void (*tmf_sent_cb)(enum isci_tmf_cb_state,
struct isci_tmf *,
void *),
struct isci_request *old_request)
{
- isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb,
+ isci_task_build_tmf(tmf, code, tmf_sent_cb,
(void *)old_request);
tmf->io_tag = old_request->io_tag;
}
-static struct isci_request *isci_task_get_request_from_task(
- struct sas_task *task,
- struct isci_remote_device **isci_device)
-{
-
- struct isci_request *request = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
-
- request = task->lldd_task;
-
- /* If task is already done, the request isn't valid */
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
- (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
- (request != NULL)) {
-
- if (isci_device != NULL)
- *isci_device = request->isci_device;
- }
-
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- return request;
-}
-
/**
* isci_task_validate_request_to_abort() - This function checks the given I/O
* against the "started" state. If the request is still "started", it's
* value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
* was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
*/
- isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
- NULL);
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
- ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
if (ret == TMF_RESP_FUNC_COMPLETE)
dev_dbg(&isci_host->pdev->dev,
int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
{
struct isci_host *isci_host = dev_to_ihost(domain_device);
- struct isci_remote_device *isci_device = NULL;
+ struct isci_remote_device *isci_device;
+ unsigned long flags;
int ret;
- bool device_stopping = false;
- isci_device = domain_device->lldd_dev;
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ isci_device = isci_lookup_device(domain_device);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev,
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
__func__, domain_device, isci_host, isci_device);
- if (isci_device != NULL) {
- device_stopping = (isci_device->status == isci_stopping)
- || (isci_device->status == isci_stopped);
+ if (isci_device)
set_bit(IDEV_EH, &isci_device->flags);
- }
/* If there is a device reset pending on any request in the
* device's list, fail this LUN reset request in order to
* escalate to the device reset.
*/
- if (!isci_device || device_stopping ||
+ if (!isci_device ||
isci_device_is_reset_pending(isci_host, isci_device)) {
dev_warn(&isci_host->pdev->dev,
"%s: No dev (%p), or "
"RESET PENDING: domain_device=%p\n",
__func__, isci_device, domain_device);
- return TMF_RESP_FUNC_FAILED;
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
}
/* Send the task management part of the reset. */
isci_terminate_pending_requests(isci_host,
isci_device);
+ out:
+ isci_put_device(isci_device);
return ret;
}
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
bool any_dev_reset = false;
- bool device_stopping;
/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
* in the device, because tasks driving resets may land here
* after completion in the core.
*/
- old_request = isci_task_get_request_from_task(task, &isci_device);
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock(&task->task_state_lock);
+
+ old_request = task->lldd_task;
+
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+ old_request)
+ isci_device = isci_lookup_device(task->dev);
+
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev,
"%s: task = %p\n", __func__, task);
- /* Check if the device has been / is currently being removed.
- * If so, no task management will be done, and the I/O will
- * be terminated.
- */
- device_stopping = (isci_device->status == isci_stopping)
- || (isci_device->status == isci_stopped);
+ if (!isci_device || !old_request)
+ goto out;
- /* XXX need to fix device lookup lifetime (needs to be done
- * under scic_lock, among other things...), but for now assume
- * the device is available like the above code
- */
set_bit(IDEV_EH, &isci_device->flags);
/* This version of the driver will fail abort requests for
* SATA/STP. Failing the abort request this way will cause the
* SCSI error handler thread to escalate to LUN reset
*/
- if (sas_protocol_ata(task->task_proto) && !device_stopping) {
+ if (sas_protocol_ata(task->task_proto)) {
dev_warn(&isci_host->pdev->dev,
" task %p is for a STP/SATA device;"
" returning TMF_RESP_FUNC_FAILED\n"
" to cause a LUN reset...\n", task);
- return TMF_RESP_FUNC_FAILED;
+ goto out;
}
dev_dbg(&isci_host->pdev->dev,
"%s: old_request == %p\n", __func__, old_request);
- if (!device_stopping)
- any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
+ any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
spin_lock_irqsave(&task->task_state_lock, flags);
- /* Don't do resets to stopping devices. */
- if (device_stopping) {
-
- task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
- any_dev_reset = false;
-
- } else /* See if there is a pending device reset for this device. */
- any_dev_reset = any_dev_reset
- || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+ any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
/* If the extraction of the request reference from the task
* failed, then the request has been completed (or if there is a
"%s: abort task not needed for %p\n",
__func__, task);
}
-
- return ret;
+ goto out;
}
else
spin_unlock_irqrestore(&task->task_state_lock, flags);
"%s: device = %p; old_request %p already being aborted\n",
__func__,
isci_device, old_request);
-
- return TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
}
if ((task->task_proto == SAS_PROTOCOL_SMP)
- || device_stopping
|| old_request->complete_in_target
) {
dev_dbg(&isci_host->pdev->dev,
"%s: SMP request (%d)"
- " or device is stopping (%d)"
" or complete_in_target (%d), thus no TMF\n",
__func__, (task->task_proto == SAS_PROTOCOL_SMP),
- device_stopping, old_request->complete_in_target);
+ old_request->complete_in_target);
/* Set the state on the task. */
isci_task_all_done(task);
*/
} else {
/* Fill in the tmf stucture */
- isci_task_build_abort_task_tmf(&tmf, isci_device,
- isci_tmf_ssp_task_abort,
+ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
isci_abort_task_process_cb,
old_request);
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
- ret = isci_task_execute_tmf(isci_host, &tmf,
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
ISCI_ABORT_TASK_TIMEOUT_MS);
if (ret != TMF_RESP_FUNC_COMPLETE)
/* Make sure we do not leave a reference to aborted_io_completion */
old_request->io_request_completion = NULL;
+ out:
+ isci_put_device(isci_device);
return ret;
}
struct isci_request *ireq,
enum sci_task_status completion_status)
{
- struct isci_remote_device *idev = ireq->isci_device;
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
struct completion *tmf_complete;
struct scic_sds_request *sci_req = &ireq->sci;
/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
tmf_complete = tmf->complete;
- scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci);
+ scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci);
/* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again.
*/
dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
}
-static int isci_reset_device(struct domain_device *dev, int hard_reset)
+static int isci_reset_device(struct isci_host *ihost,
+ struct isci_remote_device *idev, int hard_reset)
{
- struct isci_remote_device *idev = dev->lldd_dev;
- struct sas_phy *phy = sas_find_local_phy(dev);
- struct isci_host *ihost = dev_to_ihost(dev);
+ struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
struct isci_port *iport = idev->isci_port;
enum sci_status status;
unsigned long flags;
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
- if (!idev) {
- dev_warn(&ihost->pdev->dev,
- "%s: idev is GONE!\n",
- __func__);
-
- return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
- }
-
spin_lock_irqsave(&ihost->scic_lock, flags);
status = scic_remote_device_reset(&idev->sci);
if (status != SCI_SUCCESS) {
int isci_task_I_T_nexus_reset(struct domain_device *dev)
{
struct isci_host *ihost = dev_to_ihost(dev);
- int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1;
struct isci_remote_device *idev;
+ int ret, hard_reset = 1;
unsigned long flags;
- /* XXX mvsas is not protecting against ->lldd_dev_gone(), are we
- * being too paranoid, or is mvsas busted?!
- */
spin_lock_irqsave(&ihost->scic_lock, flags);
- idev = dev->lldd_dev;
- if (!idev || !test_bit(IDEV_EH, &idev->flags))
- ret = TMF_RESP_FUNC_COMPLETE;
+ idev = isci_lookup_device(dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
- if (ret == TMF_RESP_FUNC_COMPLETE)
- return ret;
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
hard_reset = 0;
- return isci_reset_device(dev, hard_reset);
+ ret = isci_reset_device(ihost, idev, hard_reset);
+ out:
+ isci_put_device(idev);
+ return ret;
}
int isci_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = sdev_to_domain_dev(cmd->device);
- int hard_reset = 1;
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ int ret, hard_reset = 1;
+ unsigned long flags;
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
hard_reset = 0;
- return isci_reset_device(dev, hard_reset);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev, hard_reset);
+ out:
+ isci_put_device(idev);
+ return ret;
}