summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2012-08-03 16:36:13 (GMT)
committerJames Bottomley <JBottomley@Parallels.com>2012-09-14 13:41:19 (GMT)
commit67d1273385d454a3f1b083b807f2cdda95e995ec (patch)
tree93f6ff11c119b3e9460e6f302e09eaedf1844259 /drivers/scsi/lpfc/lpfc_init.c
parentaa6fbb757ab6fce4647bafd28f9a49e5b0fa07db (diff)
downloadlinux-67d1273385d454a3f1b083b807f2cdda95e995ec.tar.xz
[SCSI] lpfc 8.3.33: Tie parallel I/O queues into separate MSIX vectors
Add fcp_io_channel module attribute to control amount of parallel I/O queues Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c752
1 files changed, 329 insertions, 423 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 176302f..164aa87 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4702,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI;
+
+ /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
+ phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
+
/* This will be set to correct value after the read_config mbox */
phba->max_vports = 0;
@@ -4722,7 +4726,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
if (!phba->sli.ring)
phba->sli.ring = kzalloc(
- (LPFC_SLI3_MAX_RING + phba->cfg_fcp_eq_count) *
+ (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
sizeof(struct lpfc_sli_ring), GFP_KERNEL);
if (!phba->sli.ring)
return -ENOMEM;
@@ -4931,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_remove_rpi_hdrs;
}
- /*
- * The cfg_fcp_eq_count can be zero whenever there is exactly one
- * interrupt vector. This is not an error
- */
- if (phba->cfg_fcp_eq_count) {
- phba->sli4_hba.fcp_eq_hdl =
- kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
- phba->cfg_fcp_eq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_eq_hdl) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2572 Failed allocate memory for "
- "fast-path per-EQ handle array\n");
- rc = -ENOMEM;
- goto out_free_fcf_rr_bmask;
- }
+ phba->sli4_hba.fcp_eq_hdl =
+ kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_eq_hdl) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2572 Failed allocate memory for "
+ "fast-path per-EQ handle array\n");
+ rc = -ENOMEM;
+ goto out_free_fcf_rr_bmask;
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -6538,53 +6536,26 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
static int
lpfc_sli4_queue_verify(struct lpfc_hba *phba)
{
- int cfg_fcp_wq_count;
- int cfg_fcp_eq_count;
+ int cfg_fcp_io_channel;
/*
- * Sanity check for confiugred queue parameters against the run-time
+ * Sanity check for configured queue parameters against the run-time
* device parameters
*/
- /* Sanity check on FCP fast-path WQ parameters */
- cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
- if (cfg_fcp_wq_count >
- (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
- cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
- LPFC_SP_WQN_DEF;
- if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2581 Not enough WQs (%d) from "
- "the pci function for supporting "
- "FCP WQs (%d)\n",
- phba->sli4_hba.max_cfg_param.max_wq,
- phba->cfg_fcp_wq_count);
- goto out_error;
- }
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2582 Not enough WQs (%d) from the pci "
- "function for supporting the requested "
- "FCP WQs (%d), the actual FCP WQs can "
- "be supported: %d\n",
- phba->sli4_hba.max_cfg_param.max_wq,
- phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
- }
- /* The actual number of FCP work queues adopted */
- phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
-
- /* Sanity check on FCP fast-path EQ parameters */
- cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
- if (cfg_fcp_eq_count >
- (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
- cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
- LPFC_SP_EQN_DEF;
- if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+ /* Sanity check on HBA EQ parameters */
+ cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
+
+ if (cfg_fcp_io_channel >
+ phba->sli4_hba.max_cfg_param.max_eq) {
+ cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+ if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 Not enough EQs (%d) from the "
"pci function for supporting FCP "
"EQs (%d)\n",
phba->sli4_hba.max_cfg_param.max_eq,
- phba->cfg_fcp_eq_count);
+ phba->cfg_fcp_io_channel);
goto out_error;
}
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6593,22 +6564,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
"FCP EQs (%d), the actual FCP EQs can "
"be supported: %d\n",
phba->sli4_hba.max_cfg_param.max_eq,
- phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
- }
- /* It does not make sense to have more EQs than WQs */
- if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2593 The FCP EQ count(%d) cannot be greater "
- "than the FCP WQ count(%d), limiting the "
- "FCP EQ count to %d\n", cfg_fcp_eq_count,
- phba->cfg_fcp_wq_count,
- phba->cfg_fcp_wq_count);
- cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+ phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
}
+
+ /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
+
/* The actual number of FCP event queues adopted */
- phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
- /* The overall number of event queues used */
- phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
+ phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
+ phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
+ phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
/* Get EQ depth from module parameter, fake the default for now */
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6641,50 +6606,104 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+ int idx;
/*
- * Create Event Queues (EQs)
+ * Create HBA Record arrays.
*/
+ if (!phba->cfg_fcp_io_channel)
+ return -ERANGE;
- /* Create slow path event queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
- phba->sli4_hba.eq_ecount);
- if (!qdesc) {
+ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+ phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+ phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+ phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+ phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+ phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+ phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.hba_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2576 Failed allocate memory for "
+ "fast-path EQ record array\n");
+ goto out_error;
+ }
+
+ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0496 Failed allocate slow-path EQ\n");
+ "2577 Failed allocate memory for fast-path "
+ "CQ record array\n");
+ goto out_error;
+ }
+
+ phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2578 Failed allocate memory for fast-path "
+ "WQ record array\n");
goto out_error;
}
- phba->sli4_hba.sp_eq = qdesc;
/*
- * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
- * zero whenever there is exactly one interrupt vector. This is not
- * an error.
+ * Since the first EQ can have multiple CQs associated with it,
+ * this array is used to quickly see if we have a FCP fast-path
+ * CQ match.
*/
- if (phba->cfg_fcp_eq_count) {
- phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_eq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fp_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2576 Failed allocate memory for "
- "fast-path EQ record array\n");
- goto out_free_sp_eq;
- }
+ phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq_map) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2545 Failed allocate memory for fast-path "
+ "CQ map\n");
+ goto out_error;
}
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+
+ /*
+ * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
+ * how many EQs to create.
+ */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+
+ /* Create EQs */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0497 Failed allocate fast-path EQ\n");
- goto out_free_fp_eq;
+ "0497 Failed allocate EQ (%d)\n", idx);
+ goto out_error;
}
- phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+ phba->sli4_hba.hba_eq[idx] = qdesc;
+
+ /* Create Fast Path FCP CQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0499 Failed allocate fast-path FCP "
+ "CQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_cq[idx] = qdesc;
+
+ /* Create Fast Path FCP WQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0503 Failed allocate fast-path FCP "
+ "WQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_wq[idx] = qdesc;
}
+
/*
- * Create Complete Queues (CQs)
+ * Create Slow Path Completion Queues (CQs)
*/
/* Create slow-path Mailbox Command Complete Queue */
@@ -6693,7 +6712,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0500 Failed allocate slow-path mailbox CQ\n");
- goto out_free_fp_eq;
+ goto out_error;
}
phba->sli4_hba.mbx_cq = qdesc;
@@ -6703,59 +6722,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0501 Failed allocate slow-path ELS CQ\n");
- goto out_free_mbx_cq;
+ goto out_error;
}
phba->sli4_hba.els_cq = qdesc;
/*
- * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
- * If there are no FCP EQs then create exactly one FCP CQ.
+ * Create Slow Path Work Queues (WQs)
*/
- if (phba->cfg_fcp_eq_count)
- phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_eq_count),
- GFP_KERNEL);
- else
- phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2577 Failed allocate memory for fast-path "
- "CQ record array\n");
- goto out_free_els_cq;
- }
- fcp_cqidx = 0;
- do {
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path FCP "
- "CQ (%d)\n", fcp_cqidx);
- goto out_free_fcp_cq;
- }
- phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
- } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/* Create Mailbox Command Queue */
- phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
- phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0505 Failed allocate slow-path MQ\n");
- goto out_free_fcp_cq;
+ goto out_error;
}
phba->sli4_hba.mbx_wq = qdesc;
/*
- * Create all the Work Queues (WQs)
+ * Create ELS Work Queues
*/
- phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
- phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
/* Create slow-path ELS Work Queue */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6763,36 +6752,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0504 Failed allocate slow-path ELS WQ\n");
- goto out_free_mbx_wq;
+ goto out_error;
}
phba->sli4_hba.els_wq = qdesc;
- /* Create fast-path FCP Work Queue(s) */
- phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_wq_count), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2578 Failed allocate memory for fast-path "
- "WQ record array\n");
- goto out_free_els_wq;
- }
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0503 Failed allocate fast-path FCP "
- "WQ (%d)\n", fcp_wqidx);
- goto out_free_fcp_wq;
- }
- phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
- }
-
/*
* Create Receive Queue (RQ)
*/
- phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
- phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
/* Create Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6800,7 +6766,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0506 Failed allocate receive HRQ\n");
- goto out_free_fcp_wq;
+ goto out_error;
}
phba->sli4_hba.hdr_rq = qdesc;
@@ -6810,52 +6776,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0507 Failed allocate receive DRQ\n");
- goto out_free_hdr_rq;
+ goto out_error;
}
phba->sli4_hba.dat_rq = qdesc;
return 0;
-out_free_hdr_rq:
- lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
- phba->sli4_hba.hdr_rq = NULL;
-out_free_fcp_wq:
- for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
- phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
- }
- kfree(phba->sli4_hba.fcp_wq);
- phba->sli4_hba.fcp_wq = NULL;
-out_free_els_wq:
- lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
- phba->sli4_hba.els_wq = NULL;
-out_free_mbx_wq:
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
- phba->sli4_hba.mbx_wq = NULL;
-out_free_fcp_cq:
- for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
- phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
- }
- kfree(phba->sli4_hba.fcp_cq);
- phba->sli4_hba.fcp_cq = NULL;
-out_free_els_cq:
- lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
- phba->sli4_hba.els_cq = NULL;
-out_free_mbx_cq:
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
- phba->sli4_hba.mbx_cq = NULL;
-out_free_fp_eq:
- for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
- phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
- }
- kfree(phba->sli4_hba.fp_eq);
- phba->sli4_hba.fp_eq = NULL;
-out_free_sp_eq:
- lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
- phba->sli4_hba.sp_eq = NULL;
out_error:
+ lpfc_sli4_queue_destroy(phba);
return -ENOMEM;
}
@@ -6874,58 +6802,86 @@ out_error:
void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
- int fcp_qidx;
+ int idx;
+
+ if (phba->sli4_hba.hba_eq != NULL) {
+ /* Release HBA event queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.hba_eq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.hba_eq[idx]);
+ phba->sli4_hba.hba_eq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.hba_eq);
+ phba->sli4_hba.hba_eq = NULL;
+ }
+
+ if (phba->sli4_hba.fcp_cq != NULL) {
+ /* Release FCP completion queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.fcp_cq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[idx]);
+ phba->sli4_hba.fcp_cq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
+ }
+
+ if (phba->sli4_hba.fcp_wq != NULL) {
+ /* Release FCP work queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.fcp_wq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_wq[idx]);
+ phba->sli4_hba.fcp_wq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
+ }
+
+ /* Release FCP CQ mapping array */
+ if (phba->sli4_hba.fcp_cq_map != NULL) {
+ kfree(phba->sli4_hba.fcp_cq_map);
+ phba->sli4_hba.fcp_cq_map = NULL;
+ }
/* Release mailbox command work queue */
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
- phba->sli4_hba.mbx_wq = NULL;
+ if (phba->sli4_hba.mbx_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+ phba->sli4_hba.mbx_wq = NULL;
+ }
/* Release ELS work queue */
- lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
- phba->sli4_hba.els_wq = NULL;
-
- /* Release FCP work queue */
- if (phba->sli4_hba.fcp_wq != NULL)
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
- fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
- kfree(phba->sli4_hba.fcp_wq);
- phba->sli4_hba.fcp_wq = NULL;
+ if (phba->sli4_hba.els_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+ phba->sli4_hba.els_wq = NULL;
+ }
/* Release unsolicited receive queue */
- lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
- phba->sli4_hba.hdr_rq = NULL;
- lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
- phba->sli4_hba.dat_rq = NULL;
+ if (phba->sli4_hba.hdr_rq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+ phba->sli4_hba.hdr_rq = NULL;
+ }
+ if (phba->sli4_hba.dat_rq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+ phba->sli4_hba.dat_rq = NULL;
+ }
/* Release ELS complete queue */
- lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
- phba->sli4_hba.els_cq = NULL;
+ if (phba->sli4_hba.els_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+ phba->sli4_hba.els_cq = NULL;
+ }
/* Release mailbox command complete queue */
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
- phba->sli4_hba.mbx_cq = NULL;
-
- /* Release FCP response complete queue */
- fcp_qidx = 0;
- if (phba->sli4_hba.fcp_cq != NULL)
- do
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
- while (++fcp_qidx < phba->cfg_fcp_eq_count);
- kfree(phba->sli4_hba.fcp_cq);
- phba->sli4_hba.fcp_cq = NULL;
-
- /* Release fast-path event queue */
- if (phba->sli4_hba.fp_eq != NULL)
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
- fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
- kfree(phba->sli4_hba.fp_eq);
- phba->sli4_hba.fp_eq = NULL;
-
- /* Release slow-path event queue */
- lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
- phba->sli4_hba.sp_eq = NULL;
+ if (phba->sli4_hba.mbx_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+ phba->sli4_hba.mbx_cq = NULL;
+ }
return;
}
@@ -6952,56 +6908,117 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
int fcp_cq_index = 0;
/*
- * Set up Event Queues (EQs)
+ * Set up HBA Event Queues (EQs)
*/
- /* Set up slow-path event queue */
- if (!phba->sli4_hba.sp_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0520 Slow-path EQ not allocated\n");
- goto out_error;
- }
- rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
- LPFC_SP_DEF_IMAX);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0521 Failed setup of slow-path EQ: "
- "rc = 0x%x\n", rc);
- goto out_error;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2583 Slow-path EQ setup: queue-id=%d\n",
- phba->sli4_hba.sp_eq->queue_id);
-
- /* Set up fast-path event queue */
- if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+ /* Set up HBA event queue */
+ if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3147 Fast-path EQs not allocated\n");
rc = -ENOMEM;
- goto out_destroy_sp_eq;
+ goto out_error;
}
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
- if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+ if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
"allocated\n", fcp_eqidx);
rc = -ENOMEM;
- goto out_destroy_fp_eq;
+ goto out_destroy_hba_eq;
}
- rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
+ rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
phba->cfg_fcp_imax);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0523 Failed setup of fast-path EQ "
"(%d), rc = 0x%x\n", fcp_eqidx, rc);
- goto out_destroy_fp_eq;
+ goto out_destroy_hba_eq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2584 Fast-path EQ setup: "
+ "2584 HBA EQ setup: "
"queue[%d]-id=%d\n", fcp_eqidx,
- phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+ phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
}
+ /* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3148 Fast-path FCP CQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_hba_eq;
+ }
+
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
+ if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0526 Fast-path FCP CQ (%d) not "
+ "allocated\n", fcp_cqidx);
+ rc = -ENOMEM;
+ goto out_destroy_fcp_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0527 Failed setup of fast-path FCP "
+ "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+ goto out_destroy_fcp_cq;
+ }
+
+ /* Setup fcp_cq_map for fast lookup */
+ phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2588 FCP CQ setup: cq[%d]-id=%d, "
+ "parent seq[%d]-id=%d\n",
+ fcp_cqidx,
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ fcp_cqidx,
+ phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
+ }
+
+ /* Set up fast-path FCP Work Queue */
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3149 Fast-path FCP WQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_fcp_cq;
+ }
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0534 Fast-path FCP WQ (%d) not "
+ "allocated\n", fcp_wqidx);
+ rc = -ENOMEM;
+ goto out_destroy_fcp_wq;
+ }
+ rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+ phba->sli4_hba.fcp_cq[fcp_wqidx],
+ LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0535 Failed setup of fast-path FCP "
+ "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+ goto out_destroy_fcp_wq;
+ }
+
+ /* Bind this WQ to the next FCP ring */
+ pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
+ pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
+ phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2591 FCP WQ setup: wq[%d]-id=%d, "
+ "parent cq[%d]-id=%d\n",
+ fcp_wqidx,
+ phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cq_index,
+ phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
+ }
/*
* Set up Complete Queues (CQs)
*/
@@ -7011,20 +7028,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0528 Mailbox CQ not allocated\n");
rc = -ENOMEM;
- goto out_destroy_fp_eq;
+ goto out_destroy_fcp_wq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
- LPFC_MCQ, LPFC_MBOX);
+ rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
+ phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0529 Failed setup of slow-path mailbox CQ: "
"rc = 0x%x\n", rc);
- goto out_destroy_fp_eq;
+ goto out_destroy_fcp_wq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
phba->sli4_hba.mbx_cq->queue_id,
- phba->sli4_hba.sp_eq->queue_id);
+ phba->sli4_hba.hba_eq[0]->queue_id);
/* Set up slow-path ELS Complete Queue */
if (!phba->sli4_hba.els_cq) {
@@ -7033,8 +7050,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_destroy_mbx_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
- LPFC_WCQ, LPFC_ELS);
+ rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
+ phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0531 Failed setup of slow-path ELS CQ: "
@@ -7044,52 +7061,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
phba->sli4_hba.els_cq->queue_id,
- phba->sli4_hba.sp_eq->queue_id);
-
- /* Set up fast-path FCP Response Complete Queue */
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3148 Fast-path FCP CQ array not "
- "allocated\n");
- rc = -ENOMEM;
- goto out_destroy_els_cq;
- }
- fcp_cqidx = 0;
- do {
- if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0526 Fast-path FCP CQ (%d) not "
- "allocated\n", fcp_cqidx);
- rc = -ENOMEM;
- goto out_destroy_fcp_cq;
- }
- if (phba->cfg_fcp_eq_count)
- rc = lpfc_cq_create(phba,
- phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
- else
- rc = lpfc_cq_create(phba,
- phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.sp_eq,
- LPFC_WCQ, LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0527 Failed setup of fast-path FCP "
- "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
- goto out_destroy_fcp_cq;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent %seq[%d]-id=%d\n",
- fcp_cqidx,
- phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
- (phba->cfg_fcp_eq_count) ? "" : "sp_",
- fcp_cqidx,
- (phba->cfg_fcp_eq_count) ?
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
- phba->sli4_hba.sp_eq->queue_id);
- } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
+ phba->sli4_hba.hba_eq[0]->queue_id);
/*
* Set up all the Work Queues (WQs)
@@ -7100,7 +7072,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0538 Slow-path MQ not allocated\n");
rc = -ENOMEM;
- goto out_destroy_fcp_cq;
+ goto out_destroy_els_cq;
}
rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7108,7 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0539 Failed setup of slow-path MQ: "
"rc = 0x%x\n", rc);
- goto out_destroy_fcp_cq;
+ goto out_destroy_els_cq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7141,50 +7113,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.els_cq->queue_id);
- /* Set up fast-path FCP Work Queue */
- if (!phba->sli4_hba.fcp_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3149 Fast-path FCP WQ array not "
- "allocated\n");
- rc = -ENOMEM;
- goto out_destroy_els_wq;
- }
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
- if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0534 Fast-path FCP WQ (%d) not "
- "allocated\n", fcp_wqidx);
- rc = -ENOMEM;
- goto out_destroy_fcp_wq;
- }
- rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
- phba->sli4_hba.fcp_cq[fcp_cq_index],
- LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0535 Failed setup of fast-path FCP "
- "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
- goto out_destroy_fcp_wq;
- }
-
- /* Bind this WQ to the next FCP ring */
- pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
- pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
- phba->sli4_hba.fcp_cq[fcp_cq_index]->pring = pring;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2591 FCP WQ setup: wq[%d]-id=%d, "
- "parent cq[%d]-id=%d\n",
- fcp_wqidx,
- phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
- fcp_cq_index,
- phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
- /* Round robin FCP Work Queue's Completion Queue assignment */
- if (phba->cfg_fcp_eq_count)
- fcp_cq_index = ((fcp_cq_index + 1) %
- phba->cfg_fcp_eq_count);
- }
-
/*
* Create Receive Queue (RQ)
*/
@@ -7192,7 +7120,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
rc = -ENOMEM;
- goto out_destroy_fcp_wq;
+ goto out_destroy_els_wq;
}
lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7215,25 +7143,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id);
return 0;
-out_destroy_fcp_wq:
- for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
-out_destroy_fcp_cq:
- for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
-out_destroy_fp_eq:
+out_destroy_fcp_wq:
+ for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_fcp_cq:
+ for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_hba_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
-out_destroy_sp_eq:
- lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
out_error:
return rc;
}
@@ -7262,27 +7188,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset unsolicited receive queue */
lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
/* Unset FCP work queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ if (phba->sli4_hba.fcp_wq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ }
/* Unset mailbox command complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */
if (phba->sli4_hba.fcp_cq) {
- fcp_qidx = 0;
- do {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
}
/* Unset fast-path event queue */
- if (phba->sli4_hba.fp_eq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ if (phba->sli4_hba.hba_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
fcp_qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
}
- /* Unset slow-path event queue */
- lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
}
/**
@@ -8174,33 +8100,17 @@ enable_msix_vectors:
"message=%d\n", index,
phba->sli4_hba.msix_entries[index].vector,
phba->sli4_hba.msix_entries[index].entry);
+
/*
* Assign MSI-X vectors to interrupt handlers
*/
- if (vectors > 1)
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
- else
- /* All Interrupts need to be handled by one EQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_intr_handler, IRQF_SHARED,
- LPFC_DRIVER_NAME, phba);
- if (rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0485 MSI-X slow-path request_irq failed "
- "(%d)\n", rc);
- goto msi_fail_out;
- }
-
- /* The rest of the vector(s) are associated to fast-path handler(s) */
- for (index = 1; index < vectors; index++) {
- phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
- phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+ for (index = 0; index < vectors; index++) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
- &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
+ &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
LPFC_FP_DRIVER_HANDLER_NAME,
- &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+ &phba->sli4_hba.fcp_eq_hdl[index]);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -8214,12 +8124,9 @@ enable_msix_vectors:
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 1; index--)
- free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
- &phba->sli4_hba.fcp_eq_hdl[index - 1]);
-
- /* free the irq already requested */
- free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+ for (--index; index >= 0; index--)
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
msi_fail_out:
/* Unconfigure MSI-X capability structure */
@@ -8240,11 +8147,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
int index;
/* Free up MSI-X multi-message vectors */
- free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
-
- for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
+ for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
free_irq(phba->sli4_hba.msix_entries[index].vector,
- &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+ &phba->sli4_hba.fcp_eq_hdl[index]);
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
@@ -8290,7 +8195,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
- for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
}
@@ -8370,7 +8275,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
- for (index = 0; index < phba->cfg_fcp_eq_count;
+ for (index = 0; index < phba->cfg_fcp_io_channel;
index++) {
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
@@ -9490,7 +9395,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
- int adjusted_fcp_eq_count;
+ int adjusted_fcp_io_channel;
const struct firmware *fw;
uint8_t file_name[16];
@@ -9593,13 +9498,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
}
/* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- adjusted_fcp_eq_count = 0;
+ adjusted_fcp_io_channel = 0;
else if (phba->sli4_hba.msix_vec_nr <
- phba->cfg_fcp_eq_count + 1)
- adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ phba->cfg_fcp_io_channel)
+ adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
else
- adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
- phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
+ adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9735,6 +9640,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
* buffers are released to their corresponding pools here.
*/
lpfc_scsi_free(phba);
+
lpfc_sli4_driver_resource_unset(phba);
/* Unmap adapter Control and Doorbell registers */