lpfc: Add support for using block multi-queue
authorJames Smart <james.smart@avagotech.com>
Fri, 22 May 2015 14:42:38 +0000 (10:42 -0400)
committerJames Bottomley <JBottomley@Odin.com>
Sat, 13 Jun 2015 15:20:59 +0000 (08:20 -0700)
With blk-mq support in the mid-layer, lpfc can do IO steering based
on the information in the request tag.  This patch allows lpfc to use
blk-mq if enabled. If not enabled, we fall back into the emulex-internal
affinity mappings.

This feature can be turned on via CONFIG_SCSI_MQ_DEFAULT or passing
scsi_mod.use_blk_mq=Y as a parameter to the kernel.

Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
Signed-off-by: James Smart <james.smart@avagotech.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c

index 14424e66b5615b7a7ac0d149239f176bcbcb6392..f962118da8eda9b70d7771771ad8a6537dc41f0f 100644 (file)
@@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        shost->max_lun = vport->cfg_max_luns;
        shost->this_id = -1;
        shost->max_cmd_len = 16;
+       shost->nr_hw_queues = phba->cfg_fcp_io_channel;
        if (phba->sli_rev == LPFC_SLI_REV4) {
                shost->dma_boundary =
                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -8980,7 +8981,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                phba->cfg_fcp_io_channel = vectors;
        }
 
-       lpfc_sli4_set_affinity(phba, vectors);
+       if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
+               lpfc_sli4_set_affinity(phba, vectors);
        return rc;
 
 cfg_fail_out:
index 116df9c57870c7829ec3c3658e7cd251640c024b..4a2a81875cfe1c207fa3825755d6b469294ae867 100644 (file)
@@ -3845,6 +3845,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
 }
 
+/**
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
+ * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
+ * If scsi-mq is enabled, get the default block layer mapping of software queues
+ * to hardware queues. This information is saved in request tag.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
+ **/
+int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+                                 struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+       struct lpfc_vector_map_info *cpup;
+       int chann, cpu;
+       uint32_t tag;
+       uint16_t hwq;
+
+       if (shost_use_blk_mq(cmnd->device->host)) {
+               tag = blk_mq_unique_tag(cmnd->request);
+               hwq = blk_mq_unique_tag_to_hwq(tag);
+
+               return hwq;
+       }
+
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+           && phba->cfg_fcp_io_channel > 1) {
+               cpu = smp_processor_id();
+               if (cpu < phba->sli4_hba.num_present_cpu) {
+                       cpup = phba->sli4_hba.cpu_map;
+                       cpup += cpu;
+                       return cpup->channel_id;
+               }
+       }
+       chann = atomic_add_return(1, &phba->fcp_qidx);
+       chann = (chann % phba->cfg_fcp_io_channel);
+       return chann;
+}
+
+
 /**
  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
  * @phba: The Hba for which this call is being executed.
index 474e30cdee6e37d48b78fdcbfb462c6fc1e93b56..18b9260ccfac2f19e0eccaac866dffc785d37195 100644 (file)
@@ -184,3 +184,6 @@ struct lpfc_scsi_buf {
 #define FIND_FIRST_OAS_LUN              0
 #define NO_MORE_OAS_LUN                        -1
 #define NOT_OAS_ENABLED_LUN            NO_MORE_OAS_LUN
+
+int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+                                 struct lpfc_scsi_buf *lpfc_cmd);
index 41d337060f0a0a1781f895969960b0a93f0cdb49..07df296d9a20c9faf1610edf32cb0b88fed0c98f 100644 (file)
@@ -8137,36 +8137,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
        return sglq->sli4_xritag;
 }
 
-/**
- * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
- * @phba: Pointer to HBA context object.
- *
- * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
- * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
- * held.
- *
- * Return: index into SLI4 fast-path FCP queue index.
- **/
-static inline int
-lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
-{
-       struct lpfc_vector_map_info *cpup;
-       int chann, cpu;
-
-       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
-           && phba->cfg_fcp_io_channel > 1) {
-               cpu = smp_processor_id();
-               if (cpu < phba->sli4_hba.num_present_cpu) {
-                       cpup = phba->sli4_hba.cpu_map;
-                       cpup += cpu;
-                       return cpup->channel_id;
-               }
-       }
-       chann = atomic_add_return(1, &phba->fcp_qidx);
-       chann = (chann % phba->cfg_fcp_io_channel);
-       return chann;
-}
-
 /**
  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
  * @phba: Pointer to HBA context object.
@@ -8807,27 +8777,29 @@ int
 lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
                    struct lpfc_iocbq *piocb)
 {
-       if (phba->sli_rev == LPFC_SLI_REV4) {
-               if (piocb->iocb_flag &  (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
-                       if (!(phba->cfg_fof) ||
-                           (!(piocb->iocb_flag & LPFC_IO_FOF))) {
-                               if (unlikely(!phba->sli4_hba.fcp_wq))
-                                       return LPFC_HBA_ERROR;
-                               /*
-                                * for abort iocb fcp_wqidx should already
-                                * be setup based on what work queue we used.
-                                */
-                               if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
-                                       piocb->fcp_wqidx =
-                                           lpfc_sli4_scmd_to_wqidx_distr(phba);
-                               ring_number = MAX_SLI3_CONFIGURED_RINGS +
-                                               piocb->fcp_wqidx;
-                       } else {
-                               if (unlikely(!phba->sli4_hba.oas_wq))
-                                       return LPFC_HBA_ERROR;
-                               piocb->fcp_wqidx = 0;
-                               ring_number =  LPFC_FCP_OAS_RING;
-                       }
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               return ring_number;
+
+       if (piocb->iocb_flag &  (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+               if (!(phba->cfg_fof) ||
+                               (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+                       if (unlikely(!phba->sli4_hba.fcp_wq))
+                               return LPFC_HBA_ERROR;
+                       /*
+                        * for abort iocb fcp_wqidx should already
+                        * be setup based on what work queue we used.
+                        */
+                       if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
+                               piocb->fcp_wqidx =
+                                       lpfc_sli4_scmd_to_wqidx_distr(phba,
+                                                             piocb->context1);
+                       ring_number = MAX_SLI3_CONFIGURED_RINGS +
+                               piocb->fcp_wqidx;
+               } else {
+                       if (unlikely(!phba->sli4_hba.oas_wq))
+                               return LPFC_HBA_ERROR;
+                       piocb->fcp_wqidx = 0;
+                       ring_number =  LPFC_FCP_OAS_RING;
                }
        }
        return ring_number;
This page took 0.03768 seconds and 5 git commands to generate.