diff options
author | Jeff Skirvin <jeffrey.d.skirvin@intel.com> | 2011-03-09 02:22:07 (GMT) |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-07-03 10:55:30 (GMT) |
commit | f0846c68912545d70da16b2fbedded37ea4394d8 (patch) | |
tree | 76bb21f978793eaa161390307c8834b87cb644a9 | |
parent | 1fad9e934a43407c1ba397b1b6b8882aa8a2cafd (diff) | |
download | linux-fsl-qoriq-f0846c68912545d70da16b2fbedded37ea4394d8.tar.xz |
isci: Cleaning up task execute path.
Made sure the device ready check accounts for all states.
Moved the aborted task check into the loop of pulling task requests
off of the submitted list.
Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: Jacek Danecki <Jacek.Danecki@intel.com>
[remove host and device starting state checks]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/scsi/isci/task.c | 141 |
1 files changed, 71 insertions, 70 deletions
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index e9bfc22..3dc9ef3 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -83,21 +83,10 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) unsigned long flags; int ret; enum sci_status status; - + enum isci_status device_status; dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num); - if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { - - isci_task_complete_for_upper_layer( - task, - SAS_TASK_UNDELIVERED, - SAM_STAT_TASK_ABORTED, - isci_perform_normal_io_completion - ); - - return 0; /* The I/O was accepted (and failed). */ - } if ((task->dev == NULL) || (task->dev->port == NULL)) { /* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer @@ -143,93 +132,105 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) /* We don't have a valid host reference, so we * can't control the host queueing condition. */ - continue; + goto next_task; } device = isci_dev_from_domain_dev(task->dev); isci_host = isci_host_from_sas_ha(task->dev->port->ha); - if (device && device->status == isci_ready) { + if (device) + device_status = device->status; + else + device_status = isci_freed; + + /* From this point onward, any process that needs to guarantee + * that there is no kernel I/O being started will have to wait + * for the quiesce spinlock. + */ + + if (device_status != isci_ready_for_io) { /* Forces a retry from scsi mid layer. */ dev_warn(task->dev->port->ha->dev, "%s: task %p: isci_host->status = %d, " - "device = %p\n", + "device = %p; device_status = 0x%x\n\n", __func__, task, isci_host_get_state(isci_host), - device); - - if (device) - dev_dbg(task->dev->port->ha->dev, - "%s: device->status = 0x%x\n", - __func__, device->status); + device, device_status); - /* Indicate QUEUE_FULL so that the scsi midlayer - * retries. - */ - isci_task_complete_for_upper_layer( - task, - SAS_TASK_COMPLETE, - SAS_QUEUE_FULL, - isci_perform_normal_io_completion - ); + if (device_status == isci_ready) { + /* Indicate QUEUE_FULL so that the scsi midlayer + * retries. + */ + isci_task_complete_for_upper_layer( + task, + SAS_TASK_COMPLETE, + SAS_QUEUE_FULL, + isci_perform_normal_io_completion + ); + } else { + /* Else, the device is going down. */ + isci_task_complete_for_upper_layer( + task, + SAS_TASK_UNDELIVERED, + SAS_DEVICE_UNKNOWN, + isci_perform_normal_io_completion + ); + } isci_host_can_dequeue(isci_host, 1); - } - /* the device is going down... */ - else if (!device || device->status != isci_ready_for_io) { + } else { + /* There is a device and it's ready for I/O. */ + spin_lock_irqsave(&task->task_state_lock, flags); - dev_dbg(task->dev->port->ha->dev, - "%s: task %p: isci_host->status = %d, " - "device = %p\n", - __func__, - task, - isci_host_get_state(isci_host), - device); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { - if (device) - dev_dbg(task->dev->port->ha->dev, - "%s: device->status = 0x%x\n", - __func__, device->status); + spin_unlock_irqrestore(&task->task_state_lock, + flags); - /* Indicate SAS_TASK_UNDELIVERED, so that the scsi - * midlayer removes the target. - */ - isci_task_complete_for_upper_layer( - task, - SAS_TASK_UNDELIVERED, - SAS_DEVICE_UNKNOWN, - isci_perform_normal_io_completion - ); - isci_host_can_dequeue(isci_host, 1); + isci_task_complete_for_upper_layer( + task, + SAS_TASK_UNDELIVERED, + SAM_STAT_TASK_ABORTED, + isci_perform_normal_io_completion + ); - } else { - /* build and send the request. */ - status = isci_request_execute(isci_host, task, &request, - gfp_flags); + /* The I/O was aborted. */ - if (status == SCI_SUCCESS) { - spin_lock_irqsave(&task->task_state_lock, flags); + } else { task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - } else { - /* Indicate QUEUE_FULL so that the scsi - * midlayer retries. if the request - * failed for remote device reasons, - * it gets returned as - * SAS_TASK_UNDELIVERED next time - * through. - */ - isci_task_complete_for_upper_layer( + + /* build and send the request. */ + status = isci_request_execute(isci_host, task, &request, + gfp_flags); + + if (status != SCI_SUCCESS) { + + spin_lock_irqsave(&task->task_state_lock, flags); + /* Did not really start this command. */ + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Indicate QUEUE_FULL so that the scsi + * midlayer retries. if the request + * failed for remote device reasons, + * it gets returned as + * SAS_TASK_UNDELIVERED next time + * through. + */ + isci_task_complete_for_upper_layer( task, SAS_TASK_COMPLETE, SAS_QUEUE_FULL, isci_perform_normal_io_completion ); - isci_host_can_dequeue(isci_host, 1); + isci_host_can_dequeue(isci_host, 1); + } } } +next_task: task = list_entry(task->list.next, struct sas_task, list); } while (--num > 0); return 0; |