Merge remote-tracking branch 'scsi/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 03:02:05 +0000 (13:02 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 03:02:05 +0000 (13:02 +1000)
67 files changed:
Documentation/powerpc/cxlflash.txt
Documentation/scsi/smartpqi.txt [new file with mode: 0644]
MAINTAINERS
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptfc.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_dbf.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_fsf.h
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/cxlflash/superpipe.c
drivers/scsi/cxlflash/superpipe.h
drivers/scsi/cxlflash/vlun.c
drivers/scsi/esas2r/esas2r_init.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hosts.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/pmcraid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/sg.c
drivers/scsi/smartpqi/Kconfig [new file with mode: 0644]
drivers/scsi/smartpqi/Makefile [new file with mode: 0644]
drivers/scsi/smartpqi/smartpqi.h [new file with mode: 0644]
drivers/scsi/smartpqi/smartpqi_init.c [new file with mode: 0644]
drivers/scsi/smartpqi/smartpqi_sas_transport.c [new file with mode: 0644]
drivers/scsi/smartpqi/smartpqi_sis.c [new file with mode: 0644]
drivers/scsi/smartpqi/smartpqi_sis.h [new file with mode: 0644]
drivers/scsi/sr.c
drivers/scsi/ufs/tc-dwc-g210.c
include/uapi/scsi/cxlflash_ioctl.h

index 4202d1bc583c57fc6ca98271688c414b743896f8..6d9a2ed32cad2848ad9d5c5890444b461febb84b 100644 (file)
@@ -121,7 +121,7 @@ Block library API
     below.
 
     The block library can be found on GitHub:
-    http://www.github.com/mikehollinger/ibmcapikv
+    http://github.com/open-power/capiflash
 
 
 CXL Flash Driver IOCTLs
@@ -171,11 +171,30 @@ DK_CXLFLASH_ATTACH
           destroyed, the tokens are to be considered stale and subsequent
           usage will result in errors.
 
+       - A valid adapter file descriptor (fd2 >= 0) is only returned on
+         the initial attach for a context. Subsequent attaches to an
+         existing context (DK_CXLFLASH_ATTACH_REUSE_CONTEXT flag present)
+         do not provide the adapter file descriptor as it was previously
+         made known to the application.
+
         - When a context is no longer needed, the user shall detach from
-          the context via the DK_CXLFLASH_DETACH ioctl.
+          the context via the DK_CXLFLASH_DETACH ioctl. When this ioctl
+         returns with a valid adapter file descriptor and the return flag
+         DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
+         close the adapter file descriptor following a successful detach.
+
+       - When this ioctl returns with a valid fd2 and the return flag
+         DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
+         close fd2 in the following circumstances:
+
+               + Following a successful detach of the last user of the context
+               + Following a successful recovery on the context's original fd2
+               + In the child process of a fork(), following a clone ioctl,
+                 on the fd2 associated with the source context
 
-        - A close on fd2 will invalidate the tokens. This operation is not
-          required by the user.
+        - At any time, a close on fd2 will invalidate the tokens. Applications
+         should exercise caution to only close fd2 when appropriate (outlined
+         in the previous bullet) to avoid premature loss of I/O.
 
 DK_CXLFLASH_USER_DIRECT
 -----------------------
@@ -254,6 +273,10 @@ DK_CXLFLASH_DETACH
     success, all "tokens" which had been provided to the user from the
     DK_CXLFLASH_ATTACH onward are no longer valid.
 
+    When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+    attach, the application _must_ close the fd2 associated with the context
+    following the detach of the final user of the context.
+
 DK_CXLFLASH_VLUN_CLONE
 ----------------------
     This ioctl is responsible for cloning a previously created
@@ -261,7 +284,7 @@ DK_CXLFLASH_VLUN_CLONE
     support maintaining user space access to storage after a process
     forks. Upon success, the child process (which invoked the ioctl)
     will have access to the same LUNs via the same resource handle(s)
-    and fd2 as the parent, but under a different context.
+    as the parent, but under a different context.
 
     Context sharing across processes is not supported with CXL and
     therefore each fork must be met with establishing a new context
@@ -275,6 +298,12 @@ DK_CXLFLASH_VLUN_CLONE
     translation tables are copied from the parent context to the child's
     and then synced with the AFU.
 
+    When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+    attach, the application _must_ close the fd2 associated with the source
+    context (still resident/accessible in the parent process) following the
+    clone. This is to avoid a stale entry in the file descriptor table of the
+    child process.
+
 DK_CXLFLASH_VERIFY
 ------------------
     This ioctl is used to detect various changes such as the capacity of
@@ -309,6 +338,11 @@ DK_CXLFLASH_RECOVER_AFU
     at which time the context/resources they held will be freed as part of
     the release fop.
 
+    When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+    attach, the application _must_ unmap and close the fd2 associated with the
+    original context following this ioctl returning success and indicating that
+    the context was recovered (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET).
+
 DK_CXLFLASH_MANAGE_LUN
 ----------------------
     This ioctl is used to switch a LUN from a mode where it is available
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
new file mode 100644 (file)
index 0000000..ab377d9
--- /dev/null
@@ -0,0 +1,80 @@
+
+SMARTPQI - Microsemi Smart PQI Driver
+-----------------------------------------
+
+This file describes the smartpqi SCSI driver for Microsemi
+(http://www.microsemi.com) PQI controllers. The smartpqi driver
+is the next generation SCSI driver for Microsemi Corp. The smartpqi
+driver is the first SCSI driver to implement the PQI queuing model.
+
+The smartpqi driver will replace the aacraid driver for Adaptec Series 9
+controllers. Customers running an older kernel (Pre-4.9) using an Adaptec
+Series 9 controller will have to configure the smartpqi driver or their
+volumes will not be added to the OS.
+
+For Microsemi smartpqi controller support, enable the smartpqi driver
+when configuring the kernel.
+
+For more information on the PQI Queuing Interface, please see:
+http://www.t10.org/drafts.htm
+http://www.t10.org/members/w_pqi2.htm
+
+Supported devices:
+------------------
+<Controller names to be added as they become publically available.>
+
+smartpqi specific entries in /sys
+-----------------------------
+
+  smartpqi host attributes:
+  -------------------------
+  /sys/class/scsi_host/host*/rescan
+  /sys/class/scsi_host/host*/version
+
+  The host rescan attribute is a write only attribute. Writing to this
+  attribute will trigger the driver to scan for new, changed, or removed
+  devices and notify the SCSI mid-layer of any changes detected.
+
+  The version attribute is read-only and will return the driver version
+  and the controller firmware version.
+  For example:
+              driver: 0.9.13-370
+              firmware: 0.01-522
+
+  smartpqi sas device attributes
+  ------------------------------
+  HBA devices are added to the SAS transport layer. These attributes are
+  automatically added by the SAS transport layer.
+
+  /sys/class/sas_device/end_device-X:X/sas_address
+  /sys/class/sas_device/end_device-X:X/enclosure_identifier
+  /sys/class/sas_device/end_device-X:X/scsi_target_id
+
+smartpqi specific ioctls:
+-------------------------
+
+  For compatibility with applications written for the cciss protocol.
+
+  CCISS_DEREGDISK
+  CCISS_REGNEWDISK
+  CCISS_REGNEWD
+
+  The above three ioctls all do exactly the same thing, which is to cause the driver
+  to rescan for new devices.  This does exactly the same thing as writing to the
+  smartpqi specific host "rescan" attribute.
+
+  CCISS_GETPCIINFO
+
+       Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
+
+  CCISS_GETDRIVVER
+
+       Returns driver version in three bytes encoded as:
+       (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | (DRIVER_RELEASE << 16) | DRIVER_REVISION;
+
+  CCISS_PASSTHRU
+
+       Allows "BMIC" and "CISS" commands to be passed through to the Smart Storage Array.
+       These are used extensively by the SSA Array Configuration Utility, SNMP storage
+       agents, etc.
+
index 993f47292dbcf21172020011944588dd119780c8..87daa811ab94f0cb85a6529b941cc1c48bae2519 100644 (file)
@@ -7917,6 +7917,18 @@ W:       http://www.melexis.com
 S:     Supported
 F:     drivers/iio/temperature/mlx90614.c
 
+MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
+M:     Don Brace <don.brace@microsemi.com>
+L:     esc.storagedev@microsemi.com
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/smartpqi/smartpqi*.[ch]
+F:     drivers/scsi/smartpqi/Kconfig
+F:     drivers/scsi/smartpqi/Makefile
+F:     include/linux/cciss*.h
+F:     include/uapi/linux/cciss*.h
+F:     Documentation/scsi/smartpqi.txt
+
 MN88472 MEDIA DRIVER
 M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
@@ -10666,12 +10678,12 @@ S:    Maintained
 F:     drivers/misc/phantom.c
 F:     include/uapi/linux/phantom.h
 
-SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
-M:     Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
-M:     Ketan Mukadam <ketan.mukadam@avagotech.com>
-M:     John Soni Jose <sony.john@avagotech.com>
+Emulex 10Gbps iSCSI - OneConnect DRIVER
+M:     Subbu Seetharaman <subbu.seetharaman@broadcom.com>
+M:     Ketan Mukadam <ketan.mukadam@broadcom.com>
+M:     Jitendra Bhivare <jitendra.bhivare@broadcom.com>
 L:     linux-scsi@vger.kernel.org
-W:     http://www.avagotech.com
+W:     http://www.broadcom.com
 S:     Supported
 F:     drivers/scsi/be2iscsi/
 
index 5537f8df85121cca06b5ba9369b43ab1e64d7826..89c7ed16b4df13057fcadb712ee369316c514ac6 100644 (file)
@@ -1865,8 +1865,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 
        snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
                 "mpt_poll_%d", ioc->id);
-       ioc->reset_work_q =
-               create_singlethread_workqueue(ioc->reset_work_q_name);
+       ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
+                                           WQ_MEM_RECLAIM, 0);
        if (!ioc->reset_work_q) {
                printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
                    ioc->name);
@@ -1992,7 +1992,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        INIT_LIST_HEAD(&ioc->fw_event_list);
        spin_lock_init(&ioc->fw_event_lock);
        snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
-       ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+       ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
+                                         WQ_MEM_RECLAIM, 0);
        if (!ioc->fw_event_q) {
                printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
                    ioc->name);
index d8bf84aef602d0f959641aea2e501f213837fca3..129e132268ff9239c9d6b9cf83c73c2691790848 100644 (file)
@@ -1324,7 +1324,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
                 "mptfc_wq_%d", sh->host_no);
        ioc->fc_rescan_work_q =
-               create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
+               alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
+                                       WQ_MEM_RECLAIM);
        if (!ioc->fc_rescan_work_q)
                goto out_mptfc_probe;
 
index 5d7fbe4e907e37e464c63e8f3278c78edcb838cf..637cf8973c9e1c55d87577815c782d6409b60b5b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
  * @tag: tag indicating which kind of unsolicited status has been received
  * @req: request for which a response was received
  */
-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
 {
        struct zfcp_dbf *dbf = req->adapter->dbf;
        struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
        rec->u.res.req_issued = req->issued;
        rec->u.res.prot_status = q_pref->prot_status;
        rec->u.res.fsf_status = q_head->fsf_status;
+       rec->u.res.port_handle = q_head->port_handle;
+       rec->u.res.lun_handle = q_head->lun_handle;
 
        memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
               FSF_PROT_STATUS_QUAL_SIZE);
@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
                                  rec->pl_len, "fsf_res", req->req_id);
        }
 
-       debug_event(dbf->hba, 1, rec, sizeof(*rec));
+       debug_event(dbf->hba, level, rec, sizeof(*rec));
        spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
        if (sdev) {
                rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
                rec->lun = zfcp_scsi_dev_lun(sdev);
-       }
+       } else
+               rec->lun = ZFCP_DBF_INVALID_LUN;
 }
 
 /**
@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+                         u64 req_id)
+{
+       struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+       struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dbf->rec_lock, flags);
+       memset(rec, 0, sizeof(*rec));
+
+       rec->id = ZFCP_DBF_REC_RUN;
+       memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+       rec->port_status = wka_port->status;
+       rec->d_id = wka_port->d_id;
+       rec->lun = ZFCP_DBF_INVALID_LUN;
+
+       rec->u.run.fsf_req_id = req_id;
+       rec->u.run.rec_status = ~0;
+       rec->u.run.rec_step = ~0;
+       rec->u.run.rec_action = ~0;
+       rec->u.run.rec_count = ~0;
+
+       debug_event(dbf->rec, 1, rec, sizeof(*rec));
+       spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
 static inline
-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
-                 u64 req_id, u32 d_id)
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+                 char *paytag, struct scatterlist *sg, u8 id, u16 len,
+                 u64 req_id, u32 d_id, u16 cap_len)
 {
        struct zfcp_dbf_san *rec = &dbf->san_buf;
        u16 rec_len;
        unsigned long flags;
+       struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+       u16 pay_sum = 0;
 
        spin_lock_irqsave(&dbf->san_lock, flags);
        memset(rec, 0, sizeof(*rec));
@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
        rec->id = id;
        rec->fsf_req_id = req_id;
        rec->d_id = d_id;
-       rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
-       memcpy(rec->payload, data, rec_len);
        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+       rec->pl_len = len; /* full length even if we cap pay below */
+       if (!sg)
+               goto out;
+       rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+       memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+       if (len <= rec_len)
+               goto out; /* skip pay record if full content in rec->payload */
+
+       /* if (len > rec_len):
+        * dump data up to cap_len ignoring small duplicate in rec->payload
+        */
+       spin_lock_irqsave(&dbf->pay_lock, flags);
+       memset(payload, 0, sizeof(*payload));
+       memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+       payload->fsf_req_id = req_id;
+       payload->counter = 0;
+       for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+               u16 pay_len, offset = 0;
+
+               while (offset < sg->length && pay_sum < cap_len) {
+                       pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+                                     (u16)(sg->length - offset));
+                       /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+                       memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+                       debug_event(dbf->pay, 1, payload,
+                                   zfcp_dbf_plen(pay_len));
+                       payload->counter++;
+                       offset += pay_len;
+                       pay_sum += pay_len;
+               }
+       }
+       spin_unlock(&dbf->pay_lock);
 
+out:
        debug_event(dbf->san, 1, rec, sizeof(*rec));
        spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
        struct zfcp_fsf_ct_els *ct_els = fsf->data;
        u16 length;
 
-       length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
-       zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
-                    fsf->req_id, d_id);
+       length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+       zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+                    length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+                                             struct zfcp_fsf_req *fsf,
+                                             u16 len)
+{
+       struct zfcp_fsf_ct_els *ct_els = fsf->data;
+       struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+       struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+       struct scatterlist *resp_entry = ct_els->resp;
+       struct fc_gpn_ft_resp *acc;
+       int max_entries, x, last = 0;
+
+       if (!(memcmp(tag, "fsscth2", 7) == 0
+             && ct_els->d_id == FC_FID_DIR_SERV
+             && reqh->ct_rev == FC_CT_REV
+             && reqh->ct_in_id[0] == 0
+             && reqh->ct_in_id[1] == 0
+             && reqh->ct_in_id[2] == 0
+             && reqh->ct_fs_type == FC_FST_DIR
+             && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+             && reqh->ct_options == 0
+             && reqh->_ct_resvd1 == 0
+             && reqh->ct_cmd == FC_NS_GPN_FT
+             /* reqh->ct_mr_size can vary so do not match but read below */
+             && reqh->_ct_resvd2 == 0
+             && reqh->ct_reason == 0
+             && reqh->ct_explan == 0
+             && reqh->ct_vendor == 0
+             && reqn->fn_resvd == 0
+             && reqn->fn_domain_id_scope == 0
+             && reqn->fn_area_id_scope == 0
+             && reqn->fn_fc4_type == FC_TYPE_FCP))
+               return len; /* not GPN_FT response so do not cap */
+
+       acc = sg_virt(resp_entry);
+       max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+               + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+                    * to account for header as 1st pseudo "entry" */;
+
+       /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+        * response, allowing us to skip special handling for it - just skip it
+        */
+       for (x = 1; x < max_entries && !last; x++) {
+               if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+                       acc++;
+               else
+                       acc = sg_virt(++resp_entry);
+
+               last = acc->fp_flags & FC_NS_FID_LAST;
+       }
+       len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+       return len; /* cap after last entry */
 }
 
 /**
@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
        struct zfcp_fsf_ct_els *ct_els = fsf->data;
        u16 length;
 
-       length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
-       zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
-                    fsf->req_id, 0);
+       length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+       zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+                    length, fsf->req_id, ct_els->d_id,
+                    zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
 }
 
 /**
@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
        struct fsf_status_read_buffer *srb =
                (struct fsf_status_read_buffer *) fsf->data;
        u16 length;
+       struct scatterlist sg;
 
        length = (u16)(srb->length -
                        offsetof(struct fsf_status_read_buffer, payload));
-       zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
-                    fsf->req_id, ntoh24(srb->d_id));
+       sg_init_one(&sg, srb->payload.data, length);
+       zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+                    fsf->req_id, ntoh24(srb->d_id), length);
 }
 
 /**
@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
  * @sc: pointer to struct scsi_cmnd
  * @fsf: pointer to struct zfcp_fsf_req
  */
-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+                  struct zfcp_fsf_req *fsf)
 {
        struct zfcp_adapter *adapter =
                (struct zfcp_adapter *) sc->device->host->hostdata[0];
@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
                }
        }
 
-       debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+       debug_event(dbf->scsi, level, rec, sizeof(*rec));
        spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 }
 
index 0be3d48681aead94466a71ab7b34c6ae7ca098ff..36d07584271d569d27ec2eeb3706235d6459e026 100644 (file)
@@ -2,7 +2,7 @@
  * zfcp device driver
  * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2015
  */
 
 #ifndef ZFCP_DBF_H
 
 #define ZFCP_DBF_INVALID_LUN   0xFFFFFFFFFFFFFFFFull
 
+enum zfcp_dbf_pseudo_erp_act_type {
+       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
 /**
  * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
  * @ready: number of ready recovery actions
@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
        u32 d_id;
 #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
        char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+       u16 pl_len;
 } __packed;
 
 /**
@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
        u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
        u32 fsf_status;
        u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+       u32 port_handle;
+       u32 lun_handle;
 } __packed;
 
 /**
@@ -279,7 +287,7 @@ static inline
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
        if (debug_level_enabled(req->adapter->dbf->hba, level))
-               zfcp_dbf_hba_fsf_res(tag, req);
+               zfcp_dbf_hba_fsf_res(tag, level, req);
 }
 
 /**
@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
                                        scmd->device->host->hostdata[0];
 
        if (debug_level_enabled(adapter->dbf->scsi, level))
-               zfcp_dbf_scsi(tag, scmd, req);
+               zfcp_dbf_scsi(tag, level, scmd, req);
 }
 
 /**
index 3fb410977014f81821e5dd16f65eebcd7f43c4d7..a59d678125bd0e0ad0bd1ca74b0d42985abb25d8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Error Recovery Procedures (ERP).
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
                break;
 
        case ZFCP_ERP_ACTION_REOPEN_PORT:
-               if (result == ZFCP_ERP_SUCCEEDED)
-                       zfcp_scsi_schedule_rport_register(port);
+               /* This switch case might also happen after a forced reopen
+                * was successfully done and thus overwritten with a new
+                * non-forced reopen at `ersfs_2'. In this case, we must not
+                * do the clean-up of the non-forced version.
+                */
+               if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+                       if (result == ZFCP_ERP_SUCCEEDED)
+                               zfcp_scsi_schedule_rport_register(port);
                /* fall through */
        case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
                put_device(&port->dev);
index 5b500652572b7c1395cc801c146f41535d310f31..c8fed9fa1cca3680015913162ce3ddfee50c85b1 100644 (file)
@@ -3,7 +3,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
  */
 
 #ifndef ZFCP_EXT_H
@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
                              struct zfcp_port *, struct scsi_device *, u8, u8);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+                         struct zfcp_fsf_req *);
 
 /* zfcp_erp.c */
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
index 522a633c866a8b1e464ec857f179365e68bb6530..75f820ca17b79b0574e3afd91df18998a3438c30 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Implementation of FSF commands.
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
                fc_host_port_type(shost) = FC_PORTTYPE_PTP;
                break;
        case FSF_TOPO_FABRIC:
-               fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+               if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+                       fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+               else
+                       fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
                break;
        case FSF_TOPO_AL:
                fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
 
        if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
                fc_host_permanent_port_name(shost) = bottom->wwpn;
-               fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
        } else
                fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
        fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
        if (zfcp_adapter_multi_buffer_active(adapter)) {
                if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
                        return -EIO;
+               qtcb->bottom.support.req_buf_length =
+                       zfcp_qdio_real_bytes(sg_req);
                if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
                        return -EIO;
+               qtcb->bottom.support.resp_buf_length =
+                       zfcp_qdio_real_bytes(sg_resp);
 
                zfcp_qdio_set_data_div(qdio, &req->qdio_req,
                                        zfcp_qdio_sbale_count(sg_req));
@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
 
        req->handler = zfcp_fsf_send_ct_handler;
        req->qtcb->header.port_handle = wka_port->handle;
+       ct->d_id = wka_port->d_id;
        req->data = ct;
 
        zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
 
        hton24(req->qtcb->bottom.support.d_id, d_id);
        req->handler = zfcp_fsf_send_els_handler;
+       els->d_id = d_id;
        req->data = els;
 
        zfcp_dbf_san_req("fssels1", req, d_id);
@@ -1575,7 +1583,7 @@ out:
 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-       struct zfcp_fsf_req *req;
+       struct zfcp_fsf_req *req = NULL;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
                zfcp_fsf_req_free(req);
 out:
        spin_unlock_irq(&qdio->req_q_lock);
+       if (req && !IS_ERR(req))
+               zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
        return retval;
 }
 
@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-       struct zfcp_fsf_req *req;
+       struct zfcp_fsf_req *req = NULL;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
                zfcp_fsf_req_free(req);
 out:
        spin_unlock_irq(&qdio->req_q_lock);
+       if (req && !IS_ERR(req))
+               zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
        return retval;
 }
 
index 57ae3ae1046d126d6dfe6769e2885bc8f210a7a9..be1c04b334c51f678d643e4c488173f8fd6be0ee 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Interface to the FSF support functions.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
  */
 
 #ifndef FSF_H
@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
  * @handler_data: data passed to handler function
  * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
  * @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
  */
 struct zfcp_fsf_ct_els {
        struct scatterlist *req;
@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
        void *handler_data;
        struct zfcp_port *port;
        int status;
+       u32 d_id;
 };
 
 #endif                         /* FSF_H */
index b3c6ff49103b851f6467da7ff1fcb361865e2698..9069f98a18172e754c943010654de65e91c2fb7c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
        ids.port_id = port->d_id;
        ids.roles = FC_RPORT_ROLE_FCP_TARGET;
 
+       zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+                         ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+                         ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
        rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
        if (!rport) {
                dev_err(&port->adapter->ccw_device->dev,
@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
        struct fc_rport *rport = port->rport;
 
        if (rport) {
+               zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+                                 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+                                 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
                fc_remote_port_delete(rport);
                port->rport = NULL;
        }
index 7d1b4317eccc1ddea1b380db3900052b731377f7..75de1dc725a17572dd45552d8de68ac10c02e8aa 100644 (file)
@@ -540,6 +540,7 @@ config SCSI_ARCMSR
 source "drivers/scsi/esas2r/Kconfig"
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/smartpqi/Kconfig"
 source "drivers/scsi/ufs/Kconfig"
 
 config SCSI_HPTIOP
index d5397987e731b7affcc507ece87f0360bb70327b..fc0d9b8f2bdf29ac496803ea5ff7e876b26fa371 100644 (file)
@@ -94,6 +94,7 @@ obj-$(CONFIG_SCSI_PAS16)      += pas16.o
 obj-$(CONFIG_SCSI_T128)                += t128.o
 obj-$(CONFIG_SCSI_DMX3191D)    += dmx3191d.o
 obj-$(CONFIG_SCSI_HPSA)                += hpsa.o
+obj-$(CONFIG_SCSI_SMARTPQI)    += smartpqi/
 obj-$(CONFIG_SCSI_DTC3280)     += dtc.o
 obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
 obj-$(CONFIG_SCSI_ZALON)       += zalon7xx.o
index 43908bbb3b23649fe383841f0a9d599c7cd8b0ae..b58c6a38fc32c6ab8fac3f96d00cafb2f257bc20 100644 (file)
@@ -230,13 +230,6 @@ static int NCR5380_poll_politely2(struct Scsi_Host *instance,
        return -ETIMEDOUT;
 }
 
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
-                                        int reg, int bit, int val, int wait)
-{
-       return NCR5380_poll_politely2(instance, reg, bit, val,
-                                               reg, bit, val, wait);
-}
-
 #if NDEBUG
 static struct {
        unsigned char mask;
index c60728785d892a2d07f53f155811b89ce3e623ad..2ed61b5d40e604245d8509032dddd6d0005ce317 100644 (file)
@@ -292,8 +292,14 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int);
 static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
 
+static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
+                                       int reg, int bit, int val, int wait)
+{
+       return NCR5380_poll_politely2(instance, reg, bit, val,
+                                               reg, bit, val, wait);
+}
+
 #endif                         /* __KERNEL__ */
 #endif                         /* NCR5380_H */
index 0fdc98bc23388d0da9fb299fab528a20bd58b9e2..7c713f797535315b9c10e760c1226b84c9a6f34e 100644 (file)
@@ -632,7 +632,7 @@ int asd_init_hw(struct asd_ha_struct *asd_ha)
                           pci_name(asd_ha->pcidev));
                return err;
        }
-       pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
+       err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
                                        v | SC_TMR_DIS);
        if (err) {
                asd_printk("couldn't disable split completion timer of %s\n",
index ee5ace87353580b9005341e9442d5797a2fb4f18..b1d0fdc5d5e106dee779bb5c676142dfddb80e20 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
@@ -89,7 +89,7 @@ struct be_aic_obj {           /* Adaptive interrupt coalescing (AIC) info */
        u32 max_eqd;            /* in usecs */
        u32 prev_eqd;           /* in usecs */
        u32 et_eqd;             /* configured val when aic is off */
-       ulong jiffs;
+       ulong jiffies;
        u64 eq_prev;            /* Used to calculate eqe */
 };
 
@@ -100,7 +100,7 @@ struct be_eq_obj {
        struct be_queue_info q;
        struct beiscsi_hba *phba;
        struct be_queue_info *cq;
-       struct work_struct work_cqs; /* Work Item */
+       struct work_struct mcc_work; /* Work Item */
        struct irq_poll iopoll;
 };
 
@@ -111,8 +111,11 @@ struct be_mcc_obj {
 
 struct beiscsi_mcc_tag_state {
        unsigned long tag_state;
-#define MCC_TAG_STATE_RUNNING  1
-#define MCC_TAG_STATE_TIMEOUT  2
+#define MCC_TAG_STATE_RUNNING  0
+#define MCC_TAG_STATE_TIMEOUT  1
+#define MCC_TAG_STATE_ASYNC    2
+#define MCC_TAG_STATE_IGNORE   3
+       void (*cbfn)(struct beiscsi_hba *, unsigned int);
        struct be_dma_mem tag_mem_state;
 };
 
index a55eaeea37e72cf42e592356191fad0900cdc741..be65da2988fbca99d3926f30a6ff4f03619d1336 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
 #include "be.h"
 #include "be_mgmt.h"
 
-int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
-{
-       u32 sreset;
-       u8 *pci_reset_offset = 0;
-       u8 *pci_online0_offset = 0;
-       u8 *pci_online1_offset = 0;
-       u32 pconline0 = 0;
-       u32 pconline1 = 0;
-       u32 i;
-
-       pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
-       pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
-       pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
-       sreset = readl((void *)pci_reset_offset);
-       sreset |= BE2_SET_RESET;
-       writel(sreset, (void *)pci_reset_offset);
-
-       i = 0;
-       while (sreset & BE2_SET_RESET) {
-               if (i > 64)
-                       break;
-               msleep(100);
-               sreset = readl((void *)pci_reset_offset);
-               i++;
-       }
-
-       if (sreset & BE2_SET_RESET) {
-               printk(KERN_ERR DRV_NAME
-                      " Soft Reset  did not deassert\n");
-               return -EIO;
-       }
-       pconline1 = BE2_MPU_IRAM_ONLINE;
-       writel(pconline0, (void *)pci_online0_offset);
-       writel(pconline1, (void *)pci_online1_offset);
-
-       sreset |= BE2_SET_RESET;
-       writel(sreset, (void *)pci_reset_offset);
-
-       i = 0;
-       while (sreset & BE2_SET_RESET) {
-               if (i > 64)
-                       break;
-               msleep(1);
-               sreset = readl((void *)pci_reset_offset);
-               i++;
-       }
-       if (sreset & BE2_SET_RESET) {
-               printk(KERN_ERR DRV_NAME
-                      " MPU Online Soft Reset did not deassert\n");
-               return -EIO;
-       }
-       return 0;
-}
-
-int be_chk_reset_complete(struct beiscsi_hba *phba)
-{
-       unsigned int num_loop;
-       u8 *mpu_sem = 0;
-       u32 status;
-
-       num_loop = 1000;
-       mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
-       msleep(5000);
-
-       while (num_loop) {
-               status = readl((void *)mpu_sem);
-
-               if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
-                       break;
-               msleep(60);
-               num_loop--;
-       }
-
-       if ((status & 0x80000000) || (!num_loop)) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BC_%d : Failed in be_chk_reset_complete"
-                           "status = 0x%x\n", status);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
-{
-       unsigned int tag = 0;
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+       "CEV",
+       "CTX",
+       "DBUF",
+       "ERX",
+       "Host",
+       "MPU",
+       "NDMA",
+       "PTC ",
+       "RDMA ",
+       "RXF ",
+       "RXIPS ",
+       "RXULP0 ",
+       "RXULP1 ",
+       "RXULP2 ",
+       "TIM ",
+       "TPOST ",
+       "TPRE ",
+       "TXIPS ",
+       "TXULP0 ",
+       "TXULP1 ",
+       "UC ",
+       "WDMA ",
+       "TXULP2 ",
+       "HOST1 ",
+       "P0_OB_LINK ",
+       "P1_OB_LINK ",
+       "HOST_GPIO ",
+       "MBOX ",
+       "AXGMAC0",
+       "AXGMAC1",
+       "JTAG",
+       "MPU_INTPEND"
+};
 
-       spin_lock(&phba->ctrl.mcc_lock);
-       if (phba->ctrl.mcc_tag_available) {
-               tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
-               phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
-               phba->ctrl.mcc_tag_status[tag] = 0;
-               phba->ctrl.ptag_state[tag].tag_state = 0;
-       }
-       if (tag) {
-               phba->ctrl.mcc_tag_available--;
-               if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
-                       phba->ctrl.mcc_alloc_index = 0;
-               else
-                       phba->ctrl.mcc_alloc_index++;
-       }
-       spin_unlock(&phba->ctrl.mcc_lock);
-       return tag;
-}
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+       "LPCMEMHOST",
+       "MGMT_MAC",
+       "PCS0ONLINE",
+       "MPU_IRAM",
+       "PCS1ONLINE",
+       "PCTL0",
+       "PCTL1",
+       "PMEM",
+       "RR",
+       "TXPB",
+       "RXPP",
+       "XAUI",
+       "TXP",
+       "ARM",
+       "IPC",
+       "HOST2",
+       "HOST3",
+       "HOST4",
+       "HOST5",
+       "HOST6",
+       "HOST7",
+       "HOST8",
+       "HOST9",
+       "NETC",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown"
+};
 
 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
                                 unsigned int *ref_tag)
@@ -133,7 +100,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
        struct be_mcc_wrb *wrb = NULL;
        unsigned int tag;
 
-       spin_lock_bh(&phba->ctrl.mcc_lock);
+       spin_lock(&phba->ctrl.mcc_lock);
        if (mccq->used == mccq->len) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -160,6 +127,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
        phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
        phba->ctrl.mcc_tag_status[tag] = 0;
        phba->ctrl.ptag_state[tag].tag_state = 0;
+       phba->ctrl.ptag_state[tag].cbfn = NULL;
        phba->ctrl.mcc_tag_available--;
        if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
                phba->ctrl.mcc_alloc_index = 0;
@@ -174,7 +142,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
        mccq->used++;
 
 alloc_failed:
-       spin_unlock_bh(&phba->ctrl.mcc_lock);
+       spin_unlock(&phba->ctrl.mcc_lock);
        return wrb;
 }
 
@@ -182,7 +150,7 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
 {
        struct be_queue_info *mccq = &ctrl->mcc_obj.q;
 
-       spin_lock_bh(&ctrl->mcc_lock);
+       spin_lock(&ctrl->mcc_lock);
        tag = tag & MCC_Q_CMD_TAG_MASK;
        ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
        if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
@@ -191,16 +159,71 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
                ctrl->mcc_free_index++;
        ctrl->mcc_tag_available++;
        mccq->used--;
-       spin_unlock_bh(&ctrl->mcc_lock);
+       spin_unlock(&ctrl->mcc_lock);
 }
 
-/**
- * beiscsi_fail_session(): Closing session with appropriate error
- * @cls_session: ptr to session
- **/
-void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
+/*
+ * beiscsi_mcc_compl_status - Return the status of MCC completion
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ */
+int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
+                              unsigned int tag,
+                              struct be_mcc_wrb **wrb,
+                              struct be_dma_mem *mbx_cmd_mem)
 {
-       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+       uint16_t status = 0, addl_status = 0, wrb_num = 0;
+       struct be_cmd_resp_hdr *mbx_resp_hdr;
+       struct be_cmd_req_hdr *mbx_hdr;
+       struct be_mcc_wrb *temp_wrb;
+       uint32_t mcc_tag_status;
+       int rc = 0;
+
+       mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
+       status = (mcc_tag_status & CQE_STATUS_MASK);
+       addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
+                       CQE_STATUS_ADDL_SHIFT);
+
+       if (mbx_cmd_mem) {
+               mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
+       } else {
+               wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
+                         CQE_STATUS_WRB_SHIFT;
+               temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+               mbx_hdr = embedded_payload(temp_wrb);
+
+               if (wrb)
+                       *wrb = temp_wrb;
+       }
+
+       if (status || addl_status) {
+               beiscsi_log(phba, KERN_WARNING,
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+                           BEISCSI_LOG_CONFIG,
+                           "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
+                           mbx_hdr->subsystem, mbx_hdr->opcode,
+                           status, addl_status);
+               rc = -EIO;
+               if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
+                       beiscsi_log(phba, KERN_WARNING,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+                                   BEISCSI_LOG_CONFIG,
+                                   "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
+                                   mbx_resp_hdr->response_length,
+                                   mbx_resp_hdr->actual_resp_len);
+                       rc = -EAGAIN;
+               }
+       }
+
+       return rc;
 }
 
 /*
@@ -217,26 +240,34 @@ void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
  * Failure: Non-Zero
  **/
 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
-                           uint32_t tag, struct be_mcc_wrb **wrb,
+                           unsigned int tag,
+                           struct be_mcc_wrb **wrb,
                            struct be_dma_mem *mbx_cmd_mem)
 {
        int rc = 0;
-       uint32_t mcc_tag_status;
-       uint16_t status = 0, addl_status = 0, wrb_num = 0;
-       struct be_mcc_wrb *temp_wrb;
-       struct be_cmd_req_hdr *mbx_hdr;
-       struct be_cmd_resp_hdr *mbx_resp_hdr;
-       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
-       if (beiscsi_error(phba))
-               return -EPERM;
+       if (beiscsi_hba_in_error(phba)) {
+               clear_bit(MCC_TAG_STATE_RUNNING,
+                         &phba->ctrl.ptag_state[tag].tag_state);
+               return -EIO;
+       }
 
        /* wait for the mccq completion */
-       rc = wait_event_interruptible_timeout(
-                               phba->ctrl.mcc_wait[tag],
-                               phba->ctrl.mcc_tag_status[tag],
-                               msecs_to_jiffies(
-                               BEISCSI_HOST_MBX_TIMEOUT));
+       rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
+                                             phba->ctrl.mcc_tag_status[tag],
+                                             msecs_to_jiffies(
+                                               BEISCSI_HOST_MBX_TIMEOUT));
+       /**
+        * Return EIO if port is being disabled. Associated DMA memory, if any,
+        * is freed by the caller. When port goes offline, MCCQ is cleaned up
+        * so does WRB.
+        */
+       if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+               clear_bit(MCC_TAG_STATE_RUNNING,
+                         &phba->ctrl.ptag_state[tag].tag_state);
+               return -EIO;
+       }
+
        /**
         * If MBOX cmd timeout expired, tag and resource allocated
         * for cmd is not freed until FW returns completion.
@@ -270,47 +301,7 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
                return -EBUSY;
        }
 
-       rc = 0;
-       mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
-       status = (mcc_tag_status & CQE_STATUS_MASK);
-       addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
-                       CQE_STATUS_ADDL_SHIFT);
-
-       if (mbx_cmd_mem) {
-               mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
-       } else {
-               wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
-                          CQE_STATUS_WRB_SHIFT;
-               temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
-               mbx_hdr = embedded_payload(temp_wrb);
-
-               if (wrb)
-                       *wrb = temp_wrb;
-       }
-
-       if (status || addl_status) {
-               beiscsi_log(phba, KERN_WARNING,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
-                           BEISCSI_LOG_CONFIG,
-                           "BC_%d : MBX Cmd Failed for "
-                           "Subsys : %d Opcode : %d with "
-                           "Status : %d and Extd_Status : %d\n",
-                           mbx_hdr->subsystem,
-                           mbx_hdr->opcode,
-                           status, addl_status);
-               rc = -EIO;
-               if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-                       mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
-                       beiscsi_log(phba, KERN_WARNING,
-                                   BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
-                                   BEISCSI_LOG_CONFIG,
-                                   "BC_%d : Insufficient Buffer Error "
-                                   "Resp_Len : %d Actual_Resp_Len : %d\n",
-                                   mbx_resp_hdr->response_length,
-                                   mbx_resp_hdr->actual_resp_len);
-                       rc = -EAGAIN;
-               }
-       }
+       rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
 
        free_mcc_wrb(&phba->ctrl, tag);
        return rc;
@@ -330,11 +321,10 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
 static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
                                      struct be_mcc_compl *compl)
 {
-       u16 compl_status, extd_status;
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
-       struct be_cmd_resp_hdr *resp_hdr;
+       u16 compl_status, extd_status;
 
        /**
         * To check if valid bit is set, check the entire word as we don't know
@@ -368,14 +358,7 @@ static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                    "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
                    hdr->subsystem, hdr->opcode, compl_status, extd_status);
-
-       if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-               /* if status is insufficient buffer, check the length */
-               resp_hdr = (struct be_cmd_resp_hdr *) hdr;
-               if (resp_hdr->response_length)
-                       return 0;
-       }
-       return -EINVAL;
+       return compl_status;
 }
 
 static void beiscsi_process_async_link(struct beiscsi_hba *phba,
@@ -391,18 +374,19 @@ static void beiscsi_process_async_link(struct beiscsi_hba *phba,
         * This has been newly introduced in SKH-R Firmware 10.0.338.45.
         **/
        if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
-               phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
-               phba->get_boot = BE_GET_BOOT_RETRIES;
+               set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
+               if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
+                       beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
                __beiscsi_log(phba, KERN_ERR,
                              "BC_%d : Link Up on Port %d tag 0x%x\n",
                              evt->physical_port, evt->event_tag);
        } else {
-               phba->state = BE_ADAPTER_LINK_DOWN;
+               clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
                __beiscsi_log(phba, KERN_ERR,
                              "BC_%d : Link Down on Port %d tag 0x%x\n",
                              evt->physical_port, evt->event_tag);
                iscsi_host_for_each_session(phba->shost,
-                                           beiscsi_fail_session);
+                                           beiscsi_session_fail);
        }
 }
 
@@ -482,8 +466,8 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
                beiscsi_process_async_link(phba, compl);
                break;
        case ASYNC_EVENT_CODE_ISCSI:
-               phba->state |= BE_ADAPTER_CHECK_BOOT;
-               phba->get_boot = BE_GET_BOOT_RETRIES;
+               if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
+                       beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
                sev = KERN_ERR;
                break;
        case ASYNC_EVENT_CODE_SLI:
@@ -519,6 +503,9 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
                return 0;
        }
 
+       /* end MCC with this tag */
+       clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
+
        if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
                beiscsi_log(phba, KERN_WARNING,
                            BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
@@ -529,9 +516,11 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
                 * Only for non-embedded cmd, PCI resource is allocated.
                 **/
                tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
-               if (tag_mem->size)
+               if (tag_mem->size) {
                        pci_free_consistent(ctrl->pdev, tag_mem->size,
                                        tag_mem->va, tag_mem->dma);
+                       tag_mem->size = 0;
+               }
                free_mcc_wrb(ctrl, tag);
                return 0;
        }
@@ -550,57 +539,25 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
                                     CQE_STATUS_ADDL_MASK;
        ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
 
-       /* write ordering forced in wake_up_interruptible */
-       clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
-       wake_up_interruptible(&ctrl->mcc_wait[tag]);
-       return 0;
-}
-
-/*
- * be_mcc_compl_poll()- Wait for MBX completion
- * @phba: driver private structure
- *
- * Wait till no more pending mcc requests are present
- *
- * return
- * Success: 0
- * Failure: Non-Zero
- *
- **/
-int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       int i;
-
-       if (!test_bit(MCC_TAG_STATE_RUNNING,
-                     &ctrl->ptag_state[tag].tag_state)) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BC_%d: tag %u state not running\n", tag);
+       if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
+               if (ctrl->ptag_state[tag].cbfn)
+                       ctrl->ptag_state[tag].cbfn(phba, tag);
+               else
+                       __beiscsi_log(phba, KERN_ERR,
+                                     "BC_%d : MBX ASYNC command with no callback\n");
+               free_mcc_wrb(ctrl, tag);
                return 0;
        }
-       for (i = 0; i < mcc_timeout; i++) {
-               if (beiscsi_error(phba))
-                       return -EIO;
 
-               beiscsi_process_mcc_cq(phba);
-               /* after polling, wrb and tag need to be released */
-               if (!test_bit(MCC_TAG_STATE_RUNNING,
-                             &ctrl->ptag_state[tag].tag_state)) {
-                       free_mcc_wrb(ctrl, tag);
-                       break;
-               }
-               udelay(100);
-       }
-
-       if (i < mcc_timeout)
+       if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
+               /* just check completion status and free wrb */
+               __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+               free_mcc_wrb(ctrl, tag);
                return 0;
+       }
 
-       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                   "BC_%d : FW Timed Out\n");
-       phba->fw_timeout = true;
-       beiscsi_ue_detect(phba);
-       return -EBUSY;
+       wake_up_interruptible(&ctrl->mcc_wait[tag]);
+       return 0;
 }
 
 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
@@ -642,7 +599,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
         */
        timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
        do {
-               if (beiscsi_error(phba))
+               if (beiscsi_hba_in_error(phba))
                        return -EIO;
 
                ready = ioread32(db);
@@ -655,16 +612,14 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
 
                if (time_after(jiffies, timeout))
                        break;
-               msleep(20);
+               /* 1ms sleep is enough in most cases */
+               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
        } while (!ready);
 
        beiscsi_log(phba, KERN_ERR,
                        BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                        "BC_%d : FW Timed Out\n");
-
-       phba->fw_timeout = true;
-       beiscsi_ue_detect(phba);
-
+       set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
        return -EBUSY;
 }
 
@@ -679,7 +634,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
  * Success: 0
  * Failure: Non-Zero
  **/
-int be_mbox_notify(struct be_ctrl_info *ctrl)
+static int be_mbox_notify(struct be_ctrl_info *ctrl)
 {
        int status;
        u32 val = 0;
@@ -819,87 +774,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        return status;
 }
 
-/**
- * be_cmd_fw_initialize()- Initialize FW
- * @ctrl: Pointer to function control structure
- *
- * Send FW initialize pattern for the function.
- *
- * return
- * Success: 0
- * Failure: Non-Zero value
- **/
-int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
-{
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       int status;
-       u8 *endian_check;
-
-       mutex_lock(&ctrl->mbox_lock);
-       memset(wrb, 0, sizeof(*wrb));
-
-       endian_check = (u8 *) wrb;
-       *endian_check++ = 0xFF;
-       *endian_check++ = 0x12;
-       *endian_check++ = 0x34;
-       *endian_check++ = 0xFF;
-       *endian_check++ = 0xFF;
-       *endian_check++ = 0x56;
-       *endian_check++ = 0x78;
-       *endian_check++ = 0xFF;
-       be_dws_cpu_to_le(wrb, sizeof(*wrb));
-
-       status = be_mbox_notify(ctrl);
-       if (status)
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BC_%d : be_cmd_fw_initialize Failed\n");
-
-       mutex_unlock(&ctrl->mbox_lock);
-       return status;
-}
-
-/**
- * be_cmd_fw_uninit()- Uinitialize FW
- * @ctrl: Pointer to function control structure
- *
- * Send FW uninitialize pattern for the function
- *
- * return
- * Success: 0
- * Failure: Non-Zero value
- **/
-int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
-{
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       int status;
-       u8 *endian_check;
-
-       mutex_lock(&ctrl->mbox_lock);
-       memset(wrb, 0, sizeof(*wrb));
-
-       endian_check = (u8 *) wrb;
-       *endian_check++ = 0xFF;
-       *endian_check++ = 0xAA;
-       *endian_check++ = 0xBB;
-       *endian_check++ = 0xFF;
-       *endian_check++ = 0xFF;
-       *endian_check++ = 0xCC;
-       *endian_check++ = 0xDD;
-       *endian_check = 0xFF;
-
-       be_dws_cpu_to_le(wrb, sizeof(*wrb));
-
-       status = be_mbox_notify(ctrl);
-       if (status)
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BC_%d : be_cmd_fw_uninit Failed\n");
-
-       mutex_unlock(&ctrl->mbox_lock);
-       return status;
-}
-
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *cq, struct be_queue_info *eq,
                          bool sol_evts, bool no_delay, int coalesce_wm)
@@ -1343,25 +1217,6 @@ error:
        return status;
 }
 
-int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_post_sgl_pages_req *req = embedded_payload(wrb);
-       int status;
-
-       mutex_lock(&ctrl->mbox_lock);
-
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                          OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
-       status = be_mbox_notify(ctrl);
-
-       mutex_unlock(&ctrl->mbox_lock);
-       return status;
-}
-
 /**
  * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
  * @phba: device priv structure instance
@@ -1402,3 +1257,564 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
 
        return tag;
 }
+
+int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
+                              struct beiscsi_hba *phba)
+{
+       struct be_dma_mem nonemb_cmd;
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct be_mgmt_controller_attributes *req;
+       struct be_sge *sge = nonembedded_sgl(wrb);
+       int status = 0;
+
+       nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+                               sizeof(struct be_mgmt_controller_attributes),
+                               &nonemb_cmd.dma);
+       if (nonemb_cmd.va == NULL) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : pci_alloc_consistent failed in %s\n",
+                           __func__);
+               return -ENOMEM;
+       }
+       nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
+       req = nonemb_cmd.va;
+       memset(req, 0, sizeof(*req));
+       mutex_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
+       sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+       sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
+       sge->len = cpu_to_le32(nonemb_cmd.size);
+       status = be_mbox_notify(ctrl);
+       if (!status) {
+               struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
+
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : Firmware Version of CMD : %s\n"
+                           "Firmware Version is : %s\n"
+                           "Developer Build, not performing version check...\n",
+                           resp->params.hba_attribs
+                           .flashrom_version_string,
+                           resp->params.hba_attribs.
+                           firmware_version_string);
+
+               phba->fw_config.iscsi_features =
+                               resp->params.hba_attribs.iscsi_features;
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : phba->fw_config.iscsi_features = %d\n",
+                           phba->fw_config.iscsi_features);
+               memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+                      firmware_version_string, BEISCSI_VER_STRLEN);
+       } else
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d :  Failed in beiscsi_check_supported_fw\n");
+       mutex_unlock(&ctrl->mbox_lock);
+       if (nonemb_cmd.va)
+               pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+                                   nonemb_cmd.va, nonemb_cmd.dma);
+
+       return status;
+}
+
+/**
+ * beiscsi_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ **/
+int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
+                         struct beiscsi_hba *phba)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
+       uint32_t cid_count, icd_count;
+       int status = -EINVAL;
+       uint8_t ulp_num = 0;
+
+       mutex_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+       be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
+
+       be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+                          EMBED_MBX_MAX_PAYLOAD_SIZE);
+
+       if (be_mbox_notify(ctrl)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : Failed in beiscsi_get_fw_config\n");
+               goto fail_init;
+       }
+
+       /* FW response formats depend on port id */
+       phba->fw_config.phys_port = pfw_cfg->phys_port;
+       if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : invalid physical port id %d\n",
+                           phba->fw_config.phys_port);
+               goto fail_init;
+       }
+
+       /* populate and check FW config against min and max values */
+       if (!is_chip_be2_be3r(phba)) {
+               phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+               phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+               if (phba->fw_config.eqid_count == 0 ||
+                   phba->fw_config.eqid_count > 2048) {
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BG_%d : invalid EQ count %d\n",
+                                   phba->fw_config.eqid_count);
+                       goto fail_init;
+               }
+               if (phba->fw_config.cqid_count == 0 ||
+                   phba->fw_config.cqid_count > 4096) {
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BG_%d : invalid CQ count %d\n",
+                                   phba->fw_config.cqid_count);
+                       goto fail_init;
+               }
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+                           phba->fw_config.eqid_count,
+                           phba->fw_config.cqid_count);
+       }
+
+       /**
+        * Check on which all ULP iSCSI Protocol is loaded.
+        * Set the Bit for those ULP. This set flag is used
+        * at all places in the code to check on which ULP
+        * iSCSi Protocol is loaded
+        **/
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (pfw_cfg->ulp[ulp_num].ulp_mode &
+                   BEISCSI_ULP_ISCSI_INI_MODE) {
+                       set_bit(ulp_num, &phba->fw_config.ulp_supported);
+
+                       /* Get the CID, ICD and Chain count for each ULP */
+                       phba->fw_config.iscsi_cid_start[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].sq_base;
+                       phba->fw_config.iscsi_cid_count[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].sq_count;
+
+                       phba->fw_config.iscsi_icd_start[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].icd_base;
+                       phba->fw_config.iscsi_icd_count[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].icd_count;
+
+                       phba->fw_config.iscsi_chain_start[ulp_num] =
+                               pfw_cfg->chain_icd[ulp_num].chain_base;
+                       phba->fw_config.iscsi_chain_count[ulp_num] =
+                               pfw_cfg->chain_icd[ulp_num].chain_count;
+
+                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                   "BG_%d : Function loaded on ULP : %d\n"
+                                   "\tiscsi_cid_count : %d\n"
+                                   "\tiscsi_cid_start : %d\n"
+                                   "\t iscsi_icd_count : %d\n"
+                                   "\t iscsi_icd_start : %d\n",
+                                   ulp_num,
+                                   phba->fw_config.
+                                   iscsi_cid_count[ulp_num],
+                                   phba->fw_config.
+                                   iscsi_cid_start[ulp_num],
+                                   phba->fw_config.
+                                   iscsi_icd_count[ulp_num],
+                                   phba->fw_config.
+                                   iscsi_icd_start[ulp_num]);
+               }
+       }
+
+       if (phba->fw_config.ulp_supported == 0) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
+                           pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
+                           pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
+               goto fail_init;
+       }
+
+       /**
+        * ICD is shared among ULPs. Use icd_count of any one loaded ULP
+        **/
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+                       break;
+       icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+       if (icd_count == 0 || icd_count > 65536) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d: invalid ICD count %d\n", icd_count);
+               goto fail_init;
+       }
+
+       cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+                   BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+       if (cid_count == 0 || cid_count > 4096) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d: invalid CID count %d\n", cid_count);
+               goto fail_init;
+       }
+
+       /**
+        * Check FW is dual ULP aware i.e. can handle either
+        * of the protocols.
+        */
+       phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+                                         BEISCSI_FUNC_DUA_MODE);
+
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BG_%d : DUA Mode : 0x%x\n",
+                   phba->fw_config.dual_ulp_aware);
+
+       /* all set, continue using this FW config */
+       status = 0;
+fail_init:
+       mutex_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
+/**
+ * beiscsi_get_port_name()- Get port name for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the alphanumeric character for port
+ *
+ **/
+int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
+{
+       int ret = 0;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_get_port_name *ioctl;
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       memset(wrb, 0, sizeof(*wrb));
+       ioctl = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+       be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_GET_PORT_NAME,
+                          EMBED_MBX_MAX_PAYLOAD_SIZE);
+       ret = be_mbox_notify(ctrl);
+       phba->port_name = 0;
+       if (!ret) {
+               phba->port_name = ioctl->p.resp.port_names >>
+                                 (phba->fw_config.phys_port * 8) & 0xff;
+       } else {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
+                           ret, ioctl->h.resp_hdr.status);
+       }
+
+       if (phba->port_name == 0)
+               phba->port_name = '?';
+
+       mutex_unlock(&ctrl->mbox_lock);
+       return ret;
+}
+
+int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_cmd_set_features *ioctl;
+       struct be_mcc_wrb *wrb;
+       int ret = 0;
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       memset(wrb, 0, sizeof(*wrb));
+       ioctl = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+       be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_SET_FEATURES,
+                          EMBED_MBX_MAX_PAYLOAD_SIZE);
+       ioctl->feature = BE_CMD_SET_FEATURE_UER;
+       ioctl->param_len = sizeof(ioctl->param.req);
+       ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
+       ret = be_mbox_notify(ctrl);
+       if (!ret) {
+               phba->ue2rp = ioctl->param.resp.ue2rp;
+               set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : HBA error recovery supported\n");
+       } else {
+               /**
+                * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
+                * Older FW versions return this error.
+                */
+               if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
+                   ret == MCC_STATUS_INVALID_LENGTH)
+                       __beiscsi_log(phba, KERN_INFO,
+                                     "BG_%d : HBA error recovery not supported\n");
+       }
+
+       mutex_unlock(&ctrl->mbox_lock);
+       return ret;
+}
+
+static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
+{
+       u32 sem;
+
+       if (is_chip_be2_be3r(phba))
+               sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
+       else
+               pci_read_config_dword(phba->pcidev,
+                                     SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+       return sem;
+}
+
+int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
+{
+       u32 loop, post, rdy = 0;
+
+       loop = 1000;
+       while (loop--) {
+               post = beiscsi_get_post_stage(phba);
+               if (post & POST_ERROR_BIT)
+                       break;
+               if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
+                       rdy = 1;
+                       break;
+               }
+               msleep(60);
+       }
+
+       if (!rdy) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BC_%d : FW not ready 0x%x\n", post);
+       }
+
+       return rdy;
+}
+
+int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+       int status;
+
+       mutex_lock(&ctrl->mbox_lock);
+
+       req = embedded_payload(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+       status = be_mbox_notify(ctrl);
+
+       mutex_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
+int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+       u8 *endian_check;
+       int status;
+
+       mutex_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+
+       endian_check = (u8 *) wrb;
+       if (load) {
+               /* to start communicating */
+               *endian_check++ = 0xFF;
+               *endian_check++ = 0x12;
+               *endian_check++ = 0x34;
+               *endian_check++ = 0xFF;
+               *endian_check++ = 0xFF;
+               *endian_check++ = 0x56;
+               *endian_check++ = 0x78;
+               *endian_check++ = 0xFF;
+       } else {
+               /* to stop communicating */
+               *endian_check++ = 0xFF;
+               *endian_check++ = 0xAA;
+               *endian_check++ = 0xBB;
+               *endian_check++ = 0xFF;
+               *endian_check++ = 0xFF;
+               *endian_check++ = 0xCC;
+               *endian_check++ = 0xDD;
+               *endian_check = 0xFF;
+       }
+       be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+       status = be_mbox_notify(ctrl);
+       if (status)
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BC_%d : special WRB message failed\n");
+       mutex_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
+int beiscsi_init_sliport(struct beiscsi_hba *phba)
+{
+       int status;
+
+       /* check POST stage before talking to FW */
+       status = beiscsi_check_fw_rdy(phba);
+       if (!status)
+               return -EIO;
+
+       /* clear all error states after checking FW rdy */
+       phba->state &= ~BEISCSI_HBA_IN_ERR;
+
+       /* check again UER support */
+       phba->state &= ~BEISCSI_HBA_UER_SUPP;
+
+       /*
+        * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
+        * It should clean up any stale info in FW for this fn.
+        */
+       status = beiscsi_cmd_function_reset(phba);
+       if (status) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : SLI Function Reset failed\n");
+               return status;
+       }
+
+       /* indicate driver is loading */
+       return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
+}
+
+/**
+ * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp: ULP number.
+ *
+ * return
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ **/
+int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct iscsi_cleanup_req_v1 *req_v1;
+       struct iscsi_cleanup_req *req;
+       struct be_mcc_wrb *wrb;
+       int status;
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       req = embedded_payload(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+                          OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
+
+       /**
+       * TODO: Check with FW folks the chute value to be set.
+       * For now, use the ULP_MASK as the chute value.
+       */
+       if (is_chip_be2_be3r(phba)) {
+               req->chute = (1 << ulp);
+               req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
+               req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
+       } else {
+               req_v1 = (struct iscsi_cleanup_req_v1 *)req;
+               req_v1->hdr.version = 1;
+               req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba,
+                                                                     ulp));
+               req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba,
+                                                                      ulp));
+       }
+
+       status = be_mbox_notify(ctrl);
+       if (status)
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BG_%d : %s failed %d\n", __func__, ulp);
+       mutex_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
+/*
+ * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+int beiscsi_detect_ue(struct beiscsi_hba *phba)
+{
+       uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+       uint32_t ue_hi = 0, ue_lo = 0;
+       uint8_t i = 0;
+       int ret = 0;
+
+       pci_read_config_dword(phba->pcidev,
+                             PCICFG_UE_STATUS_LOW, &ue_lo);
+       pci_read_config_dword(phba->pcidev,
+                             PCICFG_UE_STATUS_MASK_LOW,
+                             &ue_mask_lo);
+       pci_read_config_dword(phba->pcidev,
+                             PCICFG_UE_STATUS_HIGH,
+                             &ue_hi);
+       pci_read_config_dword(phba->pcidev,
+                             PCICFG_UE_STATUS_MASK_HI,
+                             &ue_mask_hi);
+
+       ue_lo = (ue_lo & ~ue_mask_lo);
+       ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+       if (ue_lo || ue_hi) {
+               set_bit(BEISCSI_HBA_IN_UE, &phba->state);
+               __beiscsi_log(phba, KERN_ERR,
+                             "BC_%d : HBA error detected\n");
+               ret = 1;
+       }
+
+       if (ue_lo) {
+               for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+                       if (ue_lo & 1)
+                               __beiscsi_log(phba, KERN_ERR,
+                                             "BC_%d : UE_LOW %s bit set\n",
+                                             desc_ue_status_low[i]);
+               }
+       }
+
+       if (ue_hi) {
+               for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+                       if (ue_hi & 1)
+                               __beiscsi_log(phba, KERN_ERR,
+                                             "BC_%d : UE_HIGH %s bit set\n",
+                                             desc_ue_status_hi[i]);
+               }
+       }
+       return ret;
+}
+
+/*
+ * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read SLIPORT SEMAPHORE register to check for UER
+ *
+ **/
+int beiscsi_detect_tpe(struct beiscsi_hba *phba)
+{
+       u32 post, status;
+       int ret = 0;
+
+       post = beiscsi_get_post_stage(phba);
+       status = post & POST_STAGE_MASK;
+       if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
+           POST_STAGE_RECOVERABLE_ERR) {
+               set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
+               __beiscsi_log(phba, KERN_INFO,
+                             "BC_%d : HBA error recoverable: 0x%x\n", post);
+               ret = 1;
+       } else {
+               __beiscsi_log(phba, KERN_INFO,
+                             "BC_%d : HBA in UE: 0x%x\n", post);
+       }
+
+       return ret;
+}
index deeb951e6874c6d1da49c2443144e25b2c1cb7b8..328fb5b973cdc433e908075bdb368b36723c6d13 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
@@ -57,6 +57,7 @@ struct be_mcc_wrb {
 #define MCC_STATUS_ILLEGAL_REQUEST 0x2
 #define MCC_STATUS_ILLEGAL_FIELD 0x3
 #define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
+#define MCC_STATUS_INVALID_LENGTH 0x74
 
 #define CQE_STATUS_COMPL_MASK  0xFFFF
 #define CQE_STATUS_COMPL_SHIFT 0               /* bits 0 - 15 */
@@ -97,11 +98,23 @@ struct be_mcc_compl {
 #define MPU_MAILBOX_DB_RDY_MASK        0x1     /* bit 0 */
 #define MPU_MAILBOX_DB_HI_MASK 0x2     /* bit 1 */
 
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+/********** MPU semphore: used for SH & BE ******************/
+#define SLIPORT_SOFTRESET_OFFSET               0x5c    /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_BEx           0xac    /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH            0x94    /* PCI-CFG offset */
+#define POST_STAGE_MASK                                0x0000FFFF
+#define POST_ERROR_BIT                         0x80000000
+#define POST_ERR_RECOVERY_CODE_MASK            0xF000
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK              0x00000080      /* SR bit */
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_AWAITING_HOST_RDY   0x1 /* FW awaiting goahead from host */
+#define POST_STAGE_HOST_RDY            0x2 /* Host has given go-ahed to FW */
+#define POST_STAGE_BE_RESET            0x3 /* Host wants to reset chip */
+#define POST_STAGE_ARMFW_RDY           0xC000 /* FW is done with POST */
+#define POST_STAGE_RECOVERABLE_ERR     0xE000 /* Recoverable err detected */
 
 /********** MCC door bell ************/
 #define DB_MCCQ_OFFSET 0x140
@@ -109,9 +122,6 @@ struct be_mcc_compl {
 /* Number of entries posted */
 #define DB_MCCQ_NUM_POSTED_SHIFT 16            /* bits 16 - 29 */
 
-/* MPU semphore POST stage values */
-#define POST_STAGE_ARMFW_RDY           0xc000  /* FW is done with POST */
-
 /**
  * When the async bit of mcc_compl is set, the last 4 bytes of
  * mcc_compl is interpreted as follows:
@@ -217,6 +227,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG            58
 #define OPCODE_COMMON_FUNCTION_RESET                   61
 #define OPCODE_COMMON_GET_PORT_NAME                    77
+#define OPCODE_COMMON_SET_FEATURES                     191
 
 /**
  * LIST of opcodes that are common between Initiator and Target
@@ -345,8 +356,8 @@ struct be_cmd_req_logout_fw_sess {
 
 struct be_cmd_resp_logout_fw_sess {
        struct be_cmd_resp_hdr hdr;     /* dw[4] */
-#define BEISCSI_MGMT_SESSION_CLOSE 0x20
        uint32_t session_status;
+#define BE_SESS_STATUS_CLOSE           0x20
 } __packed;
 
 struct mgmt_conn_login_options {
@@ -365,6 +376,14 @@ struct ip_addr_format {
        u16 size_of_structure;
        u8 reserved;
        u8 ip_type;
+#define BEISCSI_IP_TYPE_V4             0x1
+#define BEISCSI_IP_TYPE_STATIC_V4      0x3
+#define BEISCSI_IP_TYPE_DHCP_V4                0x5
+/* type v4 values < type v6 values */
+#define BEISCSI_IP_TYPE_V6             0x10
+#define BEISCSI_IP_TYPE_ROUTABLE_V6    0x30
+#define BEISCSI_IP_TYPE_LINK_LOCAL_V6  0x50
+#define BEISCSI_IP_TYPE_AUTO_V6                0x90
        u8 addr[16];
        u32 rsvd0;
 } __packed;
@@ -430,8 +449,13 @@ struct be_cmd_get_boot_target_req {
 
 struct be_cmd_get_boot_target_resp {
        struct be_cmd_resp_hdr hdr;
-       u32  boot_session_count;
-       int  boot_session_handle;
+       u32 boot_session_count;
+       u32 boot_session_handle;
+/**
+ * FW returns 0xffffffff if it couldn't establish connection with
+ * configured boot target.
+ */
+#define BE_BOOT_INVALID_SHANDLE        0xffffffff
 };
 
 struct be_cmd_reopen_session_req {
@@ -699,16 +723,59 @@ struct be_cmd_get_nic_conf_resp {
        u8 mac_address[ETH_ALEN];
 } __packed;
 
-#define BEISCSI_ALIAS_LEN 32
+/******************** Get HBA NAME *******************/
 
 struct be_cmd_hba_name {
        struct be_cmd_req_hdr hdr;
        u16 flags;
        u16 rsvd0;
        u8 initiator_name[ISCSI_NAME_LEN];
-       u8 initiator_alias[BEISCSI_ALIAS_LEN];
+#define BE_INI_ALIAS_LEN 32
+       u8 initiator_alias[BE_INI_ALIAS_LEN];
 } __packed;
 
+/******************** COMMON SET Features *******************/
+#define BE_CMD_SET_FEATURE_UER 0x10
+#define BE_CMD_UER_SUPP_BIT    0x1
+struct be_uer_req {
+       u32 uer;
+       u32 rsvd;
+};
+
+struct be_uer_resp {
+       u32 uer;
+       u16 ue2rp;
+       u16 ue2sr;
+};
+
+struct be_cmd_set_features {
+       union {
+               struct be_cmd_req_hdr req_hdr;
+               struct be_cmd_resp_hdr resp_hdr;
+       } h;
+       u32 feature;
+       u32 param_len;
+       union {
+               struct be_uer_req req;
+               struct be_uer_resp resp;
+               u32 rsvd[2];
+       } param;
+} __packed;
+
+int beiscsi_cmd_function_reset(struct beiscsi_hba *phba);
+
+int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load);
+
+int beiscsi_check_fw_rdy(struct beiscsi_hba *phba);
+
+int beiscsi_init_sliport(struct beiscsi_hba *phba);
+
+int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num);
+
+int beiscsi_detect_ue(struct beiscsi_hba *phba);
+
+int beiscsi_detect_tpe(struct beiscsi_hba *phba);
+
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *eq, int eq_delay);
 
@@ -723,24 +790,21 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
                        struct be_queue_info *mccq,
                        struct be_queue_info *cq);
 
-int be_poll_mcc(struct be_ctrl_info *ctrl);
-int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
-                                     struct beiscsi_hba *phba);
 unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
 
 void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
 
-int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
                            int num);
 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
-                           uint32_t tag, struct be_mcc_wrb **wrb,
+                           unsigned int tag,
+                           struct be_mcc_wrb **wrb,
                            struct be_dma_mem *mbx_cmd_mem);
-/*ISCSI Functuions */
-int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
-int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
-
+int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
+                              unsigned int tag,
+                              struct be_mcc_wrb **wrb,
+                              struct be_dma_mem *mbx_cmd_mem);
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
-int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
                                 unsigned int *ref_tag);
@@ -749,9 +813,6 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
 int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
                              struct be_mcc_compl *compl);
 
-
-int be_mbox_notify(struct be_ctrl_info *ctrl);
-
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                                    struct be_queue_info *cq,
                                    struct be_queue_info *dq, int length,
@@ -767,8 +828,6 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
                                struct be_dma_mem *q_mem, u32 page_offset,
                                u32 num_pages);
 
-int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
-
 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
                       struct be_queue_info *wrbq,
                       struct hwi_wrb_context *pwrb_context,
@@ -777,6 +836,15 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 /* Configuration Functions */
 int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 
+int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
+                              struct beiscsi_hba *phba);
+
+int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
+
+int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
+
+int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
+
 struct be_default_pdu_context {
        u32 dw[4];
 } __packed;
@@ -999,7 +1067,16 @@ struct iscsi_cleanup_req {
        u16 chute;
        u8 hdr_ring_id;
        u8 data_ring_id;
+} __packed;
 
+struct iscsi_cleanup_req_v1 {
+       struct be_cmd_req_hdr hdr;
+       u16 chute;
+       u16 rsvd1;
+       u16 hdr_ring_id;
+       u16 rsvd2;
+       u16 data_ring_id;
+       u16 rsvd3;
 } __packed;
 
 struct eq_delay {
@@ -1368,14 +1445,9 @@ struct be_cmd_get_port_name {
                                                 * the cxn
                                                 */
 
-int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
-int be_chk_reset_complete(struct beiscsi_hba *phba);
-
 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
                        bool embedded, u8 sge_cnt);
 
 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
                        u8 subsystem, u8 opcode, int cmd_len);
-
-void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
 #endif /* !BEISCSI_CMDS_H */
index 09f89a3eaa87605733d673741aedc64c309f60ce..ba258217614e6f38d0c20f9ec7673dd65191116c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
@@ -52,22 +52,20 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 
 
        if (!ep) {
-               printk(KERN_ERR
-                      "beiscsi_session_create: invalid ep\n");
+               pr_err("beiscsi_session_create: invalid ep\n");
                return NULL;
        }
        beiscsi_ep = ep->dd_data;
        phba = beiscsi_ep->phba;
 
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : PCI_ERROR Recovery\n");
-               return NULL;
-       } else {
+       if (!beiscsi_hba_is_online(phba)) {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In beiscsi_session_create\n");
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
+               return NULL;
        }
 
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_session_create\n");
        if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Cannot handle %d cmds."
@@ -119,6 +117,16 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
        iscsi_session_teardown(cls_session);
 }
 
+/**
+ * beiscsi_session_fail(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ **/
+void beiscsi_session_fail(struct iscsi_cls_session *cls_session)
+{
+       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+
 /**
  * beiscsi_conn_create - create an instance of iscsi connection
  * @cls_session: ptr to iscsi_cls_session
@@ -237,7 +245,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
        return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
 }
 
-static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
+static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
 {
        if (phba->ipv4_iface)
                return 0;
@@ -256,7 +264,7 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
        return 0;
 }
 
-static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
+static int beiscsi_iface_create_ipv6(struct beiscsi_hba *phba)
 {
        if (phba->ipv6_iface)
                return 0;
@@ -275,79 +283,31 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
        return 0;
 }
 
-void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
+void beiscsi_iface_create_default(struct beiscsi_hba *phba)
 {
        struct be_cmd_get_if_info_resp *if_info;
 
-       if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
-               beiscsi_create_ipv4_iface(phba);
+       if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V4, &if_info)) {
+               beiscsi_iface_create_ipv4(phba);
                kfree(if_info);
        }
 
-       if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
-               beiscsi_create_ipv6_iface(phba);
+       if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V6, &if_info)) {
+               beiscsi_iface_create_ipv6(phba);
                kfree(if_info);
        }
 }
 
-void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
+void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
 {
-       if (phba->ipv6_iface)
+       if (phba->ipv6_iface) {
                iscsi_destroy_iface(phba->ipv6_iface);
-       if (phba->ipv4_iface)
-               iscsi_destroy_iface(phba->ipv4_iface);
-}
-
-static int
-beiscsi_set_static_ip(struct Scsi_Host *shost,
-               struct iscsi_iface_param_info *iface_param,
-               void *data, uint32_t dt_len)
-{
-       struct beiscsi_hba *phba = iscsi_host_priv(shost);
-       struct iscsi_iface_param_info *iface_ip = NULL;
-       struct iscsi_iface_param_info *iface_subnet = NULL;
-       struct nlattr *nla;
-       int ret;
-
-
-       switch (iface_param->param) {
-       case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
-               nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
-               if (nla)
-                       iface_ip = nla_data(nla);
-
-               nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
-               if (nla)
-                       iface_subnet = nla_data(nla);
-               break;
-       case ISCSI_NET_PARAM_IPV4_ADDR:
-               iface_ip = iface_param;
-               nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
-               if (nla)
-                       iface_subnet = nla_data(nla);
-               break;
-       case ISCSI_NET_PARAM_IPV4_SUBNET:
-               iface_subnet = iface_param;
-               nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
-               if (nla)
-                       iface_ip = nla_data(nla);
-               break;
-       default:
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Unsupported param %d\n",
-                           iface_param->param);
+               phba->ipv6_iface = NULL;
        }
-
-       if (!iface_ip || !iface_subnet) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : IP and Subnet Mask required\n");
-               return -EINVAL;
+       if (phba->ipv4_iface) {
+               iscsi_destroy_iface(phba->ipv4_iface);
+               phba->ipv4_iface = NULL;
        }
-
-       ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
-                       ISCSI_BOOTPROTO_STATIC);
-
-       return ret;
 }
 
 /**
@@ -363,137 +323,141 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
  *     Failure: Non-Zero Value
  **/
 static int
-beiscsi_set_vlan_tag(struct Scsi_Host *shost,
-                     struct iscsi_iface_param_info *iface_param)
+beiscsi_iface_config_vlan(struct Scsi_Host *shost,
+                         struct iscsi_iface_param_info *iface_param)
 {
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
-       int ret;
-
-       /* Get the Interface Handle */
-       ret = mgmt_get_all_if_id(phba);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Getting Interface Handle Failed\n");
-               return ret;
-       }
+       int ret = -EPERM;
 
        switch (iface_param->param) {
        case ISCSI_NET_PARAM_VLAN_ENABLED:
+               ret = 0;
                if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
-                       ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
+                       ret = beiscsi_if_set_vlan(phba, BEISCSI_VLAN_DISABLE);
                break;
        case ISCSI_NET_PARAM_VLAN_TAG:
-               ret = mgmt_set_vlan(phba,
-                                   *((uint16_t *)iface_param->value));
+               ret = beiscsi_if_set_vlan(phba,
+                                         *((uint16_t *)iface_param->value));
                break;
-       default:
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Unknown Param Type : %d\n",
-                           iface_param->param);
-               return -ENOSYS;
        }
        return ret;
 }
 
 
 static int
-beiscsi_set_ipv4(struct Scsi_Host *shost,
-               struct iscsi_iface_param_info *iface_param,
-               void *data, uint32_t dt_len)
+beiscsi_iface_config_ipv4(struct Scsi_Host *shost,
+                         struct iscsi_iface_param_info *info,
+                         void *data, uint32_t dt_len)
 {
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
-       int ret = 0;
+       u8 *ip = NULL, *subnet = NULL, *gw;
+       struct nlattr *nla;
+       int ret = -EPERM;
 
        /* Check the param */
-       switch (iface_param->param) {
+       switch (info->param) {
+       case ISCSI_NET_PARAM_IFACE_ENABLE:
+               if (info->value[0] == ISCSI_IFACE_ENABLE)
+                       ret = beiscsi_iface_create_ipv4(phba);
+               else {
+                       iscsi_destroy_iface(phba->ipv4_iface);
+                       phba->ipv4_iface = NULL;
+               }
+               break;
        case ISCSI_NET_PARAM_IPV4_GW:
-               ret = mgmt_set_gateway(phba, iface_param);
+               gw = info->value;
+               ret = beiscsi_if_set_gw(phba, BEISCSI_IP_TYPE_V4, gw);
                break;
        case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
-               if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
-                       ret = mgmt_set_ip(phba, iface_param,
-                                       NULL, ISCSI_BOOTPROTO_DHCP);
-               else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
-                       ret = beiscsi_set_static_ip(shost, iface_param,
-                                                   data, dt_len);
+               if (info->value[0] == ISCSI_BOOTPROTO_DHCP)
+                       ret = beiscsi_if_en_dhcp(phba, BEISCSI_IP_TYPE_V4);
+               else if (info->value[0] == ISCSI_BOOTPROTO_STATIC)
+                       /* release DHCP IP address */
+                       ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+                                                  NULL, NULL);
                else
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                                    "BS_%d : Invalid BOOTPROTO: %d\n",
-                                   iface_param->value[0]);
+                                   info->value[0]);
                break;
-       case ISCSI_NET_PARAM_IFACE_ENABLE:
-               if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
-                       ret = beiscsi_create_ipv4_iface(phba);
-               else
-                       iscsi_destroy_iface(phba->ipv4_iface);
-               break;
-       case ISCSI_NET_PARAM_IPV4_SUBNET:
        case ISCSI_NET_PARAM_IPV4_ADDR:
-               ret = beiscsi_set_static_ip(shost, iface_param,
-                                           data, dt_len);
+               ip = info->value;
+               nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+               if (nla) {
+                       info = nla_data(nla);
+                       subnet = info->value;
+               }
+               ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+                                          ip, subnet);
                break;
-       case ISCSI_NET_PARAM_VLAN_ENABLED:
-       case ISCSI_NET_PARAM_VLAN_TAG:
-               ret = beiscsi_set_vlan_tag(shost, iface_param);
+       case ISCSI_NET_PARAM_IPV4_SUBNET:
+               /*
+                * OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR ioctl needs IP
+                * and subnet both. Find IP to be applied for this subnet.
+                */
+               subnet = info->value;
+               nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+               if (nla) {
+                       info = nla_data(nla);
+                       ip = info->value;
+               }
+               ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+                                          ip, subnet);
                break;
-       default:
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Param %d not supported\n",
-                           iface_param->param);
        }
 
        return ret;
 }
 
 static int
-beiscsi_set_ipv6(struct Scsi_Host *shost,
-               struct iscsi_iface_param_info *iface_param,
-               void *data, uint32_t dt_len)
+beiscsi_iface_config_ipv6(struct Scsi_Host *shost,
+                         struct iscsi_iface_param_info *iface_param,
+                         void *data, uint32_t dt_len)
 {
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
-       int ret = 0;
+       int ret = -EPERM;
 
        switch (iface_param->param) {
        case ISCSI_NET_PARAM_IFACE_ENABLE:
                if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
-                       ret = beiscsi_create_ipv6_iface(phba);
+                       ret = beiscsi_iface_create_ipv6(phba);
                else {
                        iscsi_destroy_iface(phba->ipv6_iface);
-                       ret = 0;
+                       phba->ipv6_iface = NULL;
                }
                break;
        case ISCSI_NET_PARAM_IPV6_ADDR:
-               ret = mgmt_set_ip(phba, iface_param, NULL,
-                                 ISCSI_BOOTPROTO_STATIC);
+               ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V6,
+                                          iface_param->value, NULL);
                break;
-       case ISCSI_NET_PARAM_VLAN_ENABLED:
-       case ISCSI_NET_PARAM_VLAN_TAG:
-               ret = beiscsi_set_vlan_tag(shost, iface_param);
-               break;
-       default:
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Param %d not supported\n",
-                           iface_param->param);
        }
 
        return ret;
 }
 
-int be2iscsi_iface_set_param(struct Scsi_Host *shost,
-               void *data, uint32_t dt_len)
+int beiscsi_iface_set_param(struct Scsi_Host *shost,
+                           void *data, uint32_t dt_len)
 {
        struct iscsi_iface_param_info *iface_param = NULL;
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct nlattr *attrib;
        uint32_t rm_len = dt_len;
-       int ret = 0 ;
+       int ret;
 
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In PCI_ERROR Recovery\n");
+       if (!beiscsi_hba_is_online(phba)) {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
                return -EBUSY;
        }
 
+       /* update interface_handle */
+       ret = beiscsi_if_get_handle(phba);
+       if (ret) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Getting Interface Handle Failed\n");
+               return ret;
+       }
+
        nla_for_each_attr(attrib, data, dt_len, rm_len) {
                iface_param = nla_data(attrib);
 
@@ -512,40 +476,58 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
                        return -EINVAL;
                }
 
-               switch (iface_param->iface_type) {
-               case ISCSI_IFACE_TYPE_IPV4:
-                       ret = beiscsi_set_ipv4(shost, iface_param,
-                                              data, dt_len);
-                       break;
-               case ISCSI_IFACE_TYPE_IPV6:
-                       ret = beiscsi_set_ipv6(shost, iface_param,
-                                              data, dt_len);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : %s.0 set param %d",
+                           (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ?
+                           "ipv4" : "ipv6", iface_param->param);
+
+               ret = -EPERM;
+               switch (iface_param->param) {
+               case ISCSI_NET_PARAM_VLAN_ENABLED:
+               case ISCSI_NET_PARAM_VLAN_TAG:
+                       ret = beiscsi_iface_config_vlan(shost, iface_param);
                        break;
                default:
-                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                                   "BS_%d : Invalid iface type :%d passed\n",
-                                   iface_param->iface_type);
-                       break;
+                       switch (iface_param->iface_type) {
+                       case ISCSI_IFACE_TYPE_IPV4:
+                               ret = beiscsi_iface_config_ipv4(shost,
+                                                               iface_param,
+                                                               data, dt_len);
+                               break;
+                       case ISCSI_IFACE_TYPE_IPV6:
+                               ret = beiscsi_iface_config_ipv6(shost,
+                                                               iface_param,
+                                                               data, dt_len);
+                               break;
+                       }
                }
 
+               if (ret == -EPERM) {
+                       __beiscsi_log(phba, KERN_ERR,
+                                     "BS_%d : %s.0 set param %d not permitted",
+                                     (iface_param->iface_type ==
+                                      ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6",
+                                     iface_param->param);
+                       ret = 0;
+               }
                if (ret)
-                       return ret;
+                       break;
        }
 
        return ret;
 }
 
-static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
-               struct iscsi_iface *iface, int param,
-               char *buf)
+static int __beiscsi_iface_get_param(struct beiscsi_hba *phba,
+                                    struct iscsi_iface *iface,
+                                    int param, char *buf)
 {
        struct be_cmd_get_if_info_resp *if_info;
-       int len, ip_type = BE2_IPV4;
+       int len, ip_type = BEISCSI_IP_TYPE_V4;
 
        if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
-               ip_type = BE2_IPV6;
+               ip_type = BEISCSI_IP_TYPE_V6;
 
-       len = mgmt_get_if_info(phba, ip_type, &if_info);
+       len = beiscsi_if_get_info(phba, ip_type, &if_info);
        if (len)
                return len;
 
@@ -567,24 +549,24 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
                break;
        case ISCSI_NET_PARAM_VLAN_ENABLED:
                len = sprintf(buf, "%s\n",
-                            (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
-                            ? "Disabled\n" : "Enabled\n");
+                             (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ?
+                             "disable" : "enable");
                break;
        case ISCSI_NET_PARAM_VLAN_ID:
                if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
                        len = -EINVAL;
                else
                        len = sprintf(buf, "%d\n",
-                                    (if_info->vlan_priority &
-                                    ISCSI_MAX_VLAN_ID));
+                                     (if_info->vlan_priority &
+                                      ISCSI_MAX_VLAN_ID));
                break;
        case ISCSI_NET_PARAM_VLAN_PRIORITY:
                if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
                        len = -EINVAL;
                else
                        len = sprintf(buf, "%d\n",
-                                    ((if_info->vlan_priority >> 13) &
-                                    ISCSI_MAX_VLAN_PRIORITY));
+                                     ((if_info->vlan_priority >> 13) &
+                                      ISCSI_MAX_VLAN_PRIORITY));
                break;
        default:
                WARN_ON(1);
@@ -594,18 +576,20 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
        return len;
 }
 
-int be2iscsi_iface_get_param(struct iscsi_iface *iface,
-               enum iscsi_param_type param_type,
-               int param, char *buf)
+int beiscsi_iface_get_param(struct iscsi_iface *iface,
+                           enum iscsi_param_type param_type,
+                           int param, char *buf)
 {
        struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct be_cmd_get_def_gateway_resp gateway;
-       int len = -ENOSYS;
+       int len = -EPERM;
 
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In PCI_ERROR Recovery\n");
+       if (param_type != ISCSI_NET_PARAM)
+               return 0;
+       if (!beiscsi_hba_is_online(phba)) {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
                return -EBUSY;
        }
 
@@ -617,19 +601,22 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
        case ISCSI_NET_PARAM_VLAN_ENABLED:
        case ISCSI_NET_PARAM_VLAN_ID:
        case ISCSI_NET_PARAM_VLAN_PRIORITY:
-               len = be2iscsi_get_if_param(phba, iface, param, buf);
+               len = __beiscsi_iface_get_param(phba, iface, param, buf);
                break;
        case ISCSI_NET_PARAM_IFACE_ENABLE:
-               len = sprintf(buf, "enabled\n");
+               if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+                       len = sprintf(buf, "%s\n",
+                                     phba->ipv4_iface ? "enable" : "disable");
+               else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+                       len = sprintf(buf, "%s\n",
+                                     phba->ipv6_iface ? "enable" : "disable");
                break;
        case ISCSI_NET_PARAM_IPV4_GW:
                memset(&gateway, 0, sizeof(gateway));
-               len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
+               len = beiscsi_if_get_gw(phba, BEISCSI_IP_TYPE_V4, &gateway);
                if (!len)
                        len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
                break;
-       default:
-               len = -ENOSYS;
        }
 
        return len;
@@ -647,7 +634,7 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
                           enum iscsi_param param, char *buf)
 {
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
-       int len = 0;
+       int len;
 
        beiscsi_log(beiscsi_ep->phba, KERN_INFO,
                    BEISCSI_LOG_CONFIG,
@@ -659,13 +646,13 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
                len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
                break;
        case ISCSI_PARAM_CONN_ADDRESS:
-               if (beiscsi_ep->ip_type == BE2_IPV4)
+               if (beiscsi_ep->ip_type == BEISCSI_IP_TYPE_V4)
                        len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
                else
                        len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
                break;
        default:
-               return -ENOSYS;
+               len = -EPERM;
        }
        return len;
 }
@@ -758,7 +745,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct iscsi_cls_host *ihost = shost->shost_data;
 
-       ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ?
+       ihost->port_state = test_bit(BEISCSI_HBA_LINK_UP, &phba->state) ?
                ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
 }
 
@@ -810,16 +797,13 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        int status = 0;
 
-
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In PCI_ERROR Recovery\n");
-               return -EBUSY;
-       } else {
+       if (!beiscsi_hba_is_online(phba)) {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In beiscsi_get_host_param,"
-                           " param = %d\n", param);
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
+               return -EBUSY;
        }
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_get_host_param, param = %d\n", param);
 
        switch (param) {
        case ISCSI_HOST_PARAM_HWADDRESS:
@@ -961,15 +945,13 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
 
        phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
 
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In PCI_ERROR Recovery\n");
+       if (!beiscsi_hba_is_online(phba)) {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
                return -EBUSY;
-       } else {
-               beiscsi_log(beiscsi_conn->phba, KERN_INFO,
-                           BEISCSI_LOG_CONFIG,
-                           "BS_%d : In beiscsi_conn_start\n");
        }
+       beiscsi_log(beiscsi_conn->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_conn_start\n");
 
        memset(&params, 0, sizeof(struct beiscsi_offload_params));
        beiscsi_ep = beiscsi_conn->ep;
@@ -1186,28 +1168,20 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        struct iscsi_endpoint *ep;
        int ret;
 
-       if (shost)
-               phba = iscsi_host_priv(shost);
-       else {
+       if (!shost) {
                ret = -ENXIO;
-               printk(KERN_ERR
-                      "beiscsi_ep_connect shost is NULL\n");
+               pr_err("beiscsi_ep_connect shost is NULL\n");
                return ERR_PTR(ret);
        }
 
-       if (beiscsi_error(phba)) {
+       phba = iscsi_host_priv(shost);
+       if (!beiscsi_hba_is_online(phba)) {
                ret = -EIO;
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                           "BS_%d : The FW state Not Stable!!!\n");
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
                return ERR_PTR(ret);
        }
-
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               ret = -EBUSY;
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : In PCI_ERROR Recovery\n");
-               return ERR_PTR(ret);
-       } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
+       if (!test_bit(BEISCSI_HBA_LINK_UP, &phba->state)) {
                ret = -EBUSY;
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
                            "BS_%d : The Adapter Port state is Down!!!\n");
@@ -1361,9 +1335,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
                tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
        }
 
-       if (phba->state & BE_ADAPTER_PCI_ERR) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : PCI_ERROR Recovery\n");
+       if (!beiscsi_hba_is_online(phba)) {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : HBA in error 0x%lx\n", phba->state);
                goto free_ep;
        }
 
@@ -1386,7 +1360,7 @@ free_ep:
        iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
 }
 
-umode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t beiscsi_attr_is_visible(int param_type, int param)
 {
        switch (param_type) {
        case ISCSI_NET_PARAM:
index 0c84e1c0763acc98e04003be5b966fd2f277f450..e4d67dfea4cbd6666f09d1231546b16e43ccb848 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Avago Technologies
  * 3333 Susan Street
 #include "be_main.h"
 #include "be_mgmt.h"
 
-#define BE2_IPV4  0x1
-#define BE2_IPV6  0x10
-#define BE2_DHCP_V4 0x05
+void beiscsi_iface_create_default(struct beiscsi_hba *phba);
 
-#define NON_BLOCKING 0x0
-#define BLOCKING 0x1
+void beiscsi_iface_destroy_default(struct beiscsi_hba *phba);
 
-void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
-
-void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
-
-int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+int beiscsi_iface_get_param(struct iscsi_iface *iface,
                             enum iscsi_param_type param_type,
                             int param, char *buf);
 
-int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+int beiscsi_iface_set_param(struct Scsi_Host *shost,
                             void *data, uint32_t count);
 
-umode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t beiscsi_attr_is_visible(int param_type, int param);
 
 void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
                                struct beiscsi_offload_params *params);
@@ -57,6 +50,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 
 void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
 
+void beiscsi_session_fail(struct iscsi_cls_session *cls_session);
+
 struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
                                           *cls_session, uint32_t cid);
 
index f05e7737107d2a0fced40581b7f34bad70ca2820..6a6906f847dbba07ef38149a9912563d48ba3d82 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
@@ -374,170 +374,6 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
        return iscsi_eh_device_reset(sc);
 }
 
-static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
-{
-       struct beiscsi_hba *phba = data;
-       struct mgmt_session_info *boot_sess = &phba->boot_sess;
-       struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
-       char *str = buf;
-       int rc;
-
-       switch (type) {
-       case ISCSI_BOOT_TGT_NAME:
-               rc = sprintf(buf, "%.*s\n",
-                           (int)strlen(boot_sess->target_name),
-                           (char *)&boot_sess->target_name);
-               break;
-       case ISCSI_BOOT_TGT_IP_ADDR:
-               if (boot_conn->dest_ipaddr.ip_type == 0x1)
-                       rc = sprintf(buf, "%pI4\n",
-                               (char *)&boot_conn->dest_ipaddr.addr);
-               else
-                       rc = sprintf(str, "%pI6\n",
-                               (char *)&boot_conn->dest_ipaddr.addr);
-               break;
-       case ISCSI_BOOT_TGT_PORT:
-               rc = sprintf(str, "%d\n", boot_conn->dest_port);
-               break;
-
-       case ISCSI_BOOT_TGT_CHAP_NAME:
-               rc = sprintf(str,  "%.*s\n",
-                            boot_conn->negotiated_login_options.auth_data.chap.
-                            target_chap_name_length,
-                            (char *)&boot_conn->negotiated_login_options.
-                            auth_data.chap.target_chap_name);
-               break;
-       case ISCSI_BOOT_TGT_CHAP_SECRET:
-               rc = sprintf(str,  "%.*s\n",
-                            boot_conn->negotiated_login_options.auth_data.chap.
-                            target_secret_length,
-                            (char *)&boot_conn->negotiated_login_options.
-                            auth_data.chap.target_secret);
-               break;
-       case ISCSI_BOOT_TGT_REV_CHAP_NAME:
-               rc = sprintf(str,  "%.*s\n",
-                            boot_conn->negotiated_login_options.auth_data.chap.
-                            intr_chap_name_length,
-                            (char *)&boot_conn->negotiated_login_options.
-                            auth_data.chap.intr_chap_name);
-               break;
-       case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-               rc = sprintf(str,  "%.*s\n",
-                            boot_conn->negotiated_login_options.auth_data.chap.
-                            intr_secret_length,
-                            (char *)&boot_conn->negotiated_login_options.
-                            auth_data.chap.intr_secret);
-               break;
-       case ISCSI_BOOT_TGT_FLAGS:
-               rc = sprintf(str, "2\n");
-               break;
-       case ISCSI_BOOT_TGT_NIC_ASSOC:
-               rc = sprintf(str, "0\n");
-               break;
-       default:
-               rc = -ENOSYS;
-               break;
-       }
-       return rc;
-}
-
-static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
-{
-       struct beiscsi_hba *phba = data;
-       char *str = buf;
-       int rc;
-
-       switch (type) {
-       case ISCSI_BOOT_INI_INITIATOR_NAME:
-               rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
-               break;
-       default:
-               rc = -ENOSYS;
-               break;
-       }
-       return rc;
-}
-
-static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
-{
-       struct beiscsi_hba *phba = data;
-       char *str = buf;
-       int rc;
-
-       switch (type) {
-       case ISCSI_BOOT_ETH_FLAGS:
-               rc = sprintf(str, "2\n");
-               break;
-       case ISCSI_BOOT_ETH_INDEX:
-               rc = sprintf(str, "0\n");
-               break;
-       case ISCSI_BOOT_ETH_MAC:
-               rc  = beiscsi_get_macaddr(str, phba);
-               break;
-       default:
-               rc = -ENOSYS;
-               break;
-       }
-       return rc;
-}
-
-
-static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
-{
-       umode_t rc;
-
-       switch (type) {
-       case ISCSI_BOOT_TGT_NAME:
-       case ISCSI_BOOT_TGT_IP_ADDR:
-       case ISCSI_BOOT_TGT_PORT:
-       case ISCSI_BOOT_TGT_CHAP_NAME:
-       case ISCSI_BOOT_TGT_CHAP_SECRET:
-       case ISCSI_BOOT_TGT_REV_CHAP_NAME:
-       case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-       case ISCSI_BOOT_TGT_NIC_ASSOC:
-       case ISCSI_BOOT_TGT_FLAGS:
-               rc = S_IRUGO;
-               break;
-       default:
-               rc = 0;
-               break;
-       }
-       return rc;
-}
-
-static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
-{
-       umode_t rc;
-
-       switch (type) {
-       case ISCSI_BOOT_INI_INITIATOR_NAME:
-               rc = S_IRUGO;
-               break;
-       default:
-               rc = 0;
-               break;
-       }
-       return rc;
-}
-
-
-static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
-{
-       umode_t rc;
-
-       switch (type) {
-       case ISCSI_BOOT_ETH_FLAGS:
-       case ISCSI_BOOT_ETH_MAC:
-       case ISCSI_BOOT_ETH_INDEX:
-               rc = S_IRUGO;
-               break;
-       default:
-               rc = 0;
-               break;
-       }
-       return rc;
-}
-
 /*------------------- PCI Driver operations and data ----------------- */
 static const struct pci_device_id beiscsi_pci_id_table[] = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -850,12 +686,11 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
 {
        struct beiscsi_hba *phba;
-       struct be_eq_entry *eqe = NULL;
+       struct be_eq_entry *eqe;
        struct be_queue_info *eq;
        struct be_queue_info *mcc;
-       unsigned int num_eq_processed;
+       unsigned int mcc_events;
        struct be_eq_obj *pbe_eq;
-       unsigned long flags;
 
        pbe_eq = dev_id;
        eq = &pbe_eq->q;
@@ -863,27 +698,23 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
        mcc = &phba->ctrl.mcc_obj.cq;
        eqe = queue_tail_node(eq);
 
-       num_eq_processed = 0;
-
+       mcc_events = 0;
        while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
                                & EQE_VALID_MASK) {
                if (((eqe->dw[offsetof(struct amap_eq_entry,
                     resource_id) / 32] &
                     EQE_RESID_MASK) >> 16) == mcc->id) {
-                       spin_lock_irqsave(&phba->isr_lock, flags);
-                       pbe_eq->todo_mcc_cq = true;
-                       spin_unlock_irqrestore(&phba->isr_lock, flags);
+                       mcc_events++;
                }
                AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
                queue_tail_inc(eq);
                eqe = queue_tail_node(eq);
-               num_eq_processed++;
        }
-       if (pbe_eq->todo_mcc_cq)
-               queue_work(phba->wq, &pbe_eq->work_cqs);
-       if (num_eq_processed)
-               hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
 
+       if (mcc_events) {
+               queue_work(phba->wq, &pbe_eq->mcc_work);
+               hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1);
+       }
        return IRQ_HANDLED;
 }
 
@@ -902,7 +733,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
        eq = &pbe_eq->q;
 
        phba = pbe_eq->phba;
-
        /* disable interrupt till iopoll completes */
        hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
        irq_poll_sched(&pbe_eq->iopoll);
@@ -920,14 +750,13 @@ static irqreturn_t be_isr(int irq, void *dev_id)
        struct beiscsi_hba *phba;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
-       struct be_eq_entry *eqe = NULL;
+       struct be_eq_entry *eqe;
        struct be_queue_info *eq;
        struct be_queue_info *mcc;
-       unsigned long flags, index;
-       unsigned int num_mcceq_processed, num_ioeq_processed;
+       unsigned int mcc_events, io_events;
        struct be_ctrl_info *ctrl;
        struct be_eq_obj *pbe_eq;
-       int isr;
+       int isr, rearm;
 
        phba = dev_id;
        ctrl = &phba->ctrl;
@@ -942,44 +771,35 @@ static irqreturn_t be_isr(int irq, void *dev_id)
 
        eq = &phwi_context->be_eq[0].q;
        mcc = &phba->ctrl.mcc_obj.cq;
-       index = 0;
        eqe = queue_tail_node(eq);
 
-       num_ioeq_processed = 0;
-       num_mcceq_processed = 0;
+       io_events = 0;
+       mcc_events = 0;
        while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
                                & EQE_VALID_MASK) {
                if (((eqe->dw[offsetof(struct amap_eq_entry,
-                    resource_id) / 32] &
-                    EQE_RESID_MASK) >> 16) == mcc->id) {
-                       spin_lock_irqsave(&phba->isr_lock, flags);
-                       pbe_eq->todo_mcc_cq = true;
-                       spin_unlock_irqrestore(&phba->isr_lock, flags);
-                       num_mcceq_processed++;
-               } else {
-                       irq_poll_sched(&pbe_eq->iopoll);
-                       num_ioeq_processed++;
-               }
+                     resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
+                       mcc_events++;
+               else
+                       io_events++;
                AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
                queue_tail_inc(eq);
                eqe = queue_tail_node(eq);
        }
-       if (num_ioeq_processed || num_mcceq_processed) {
-               if (pbe_eq->todo_mcc_cq)
-                       queue_work(phba->wq, &pbe_eq->work_cqs);
-
-               if ((num_mcceq_processed) && (!num_ioeq_processed))
-                       hwi_ring_eq_db(phba, eq->id, 0,
-                                     (num_ioeq_processed +
-                                      num_mcceq_processed) , 1, 1);
-               else
-                       hwi_ring_eq_db(phba, eq->id, 0,
-                                      (num_ioeq_processed +
-                                       num_mcceq_processed), 0, 1);
-
-               return IRQ_HANDLED;
-       } else
+       if (!io_events && !mcc_events)
                return IRQ_NONE;
+
+       /* no need to rearm if interrupt is only for IOs */
+       rearm = 0;
+       if (mcc_events) {
+               queue_work(phba->wq, &pbe_eq->mcc_work);
+               /* rearm for MCCQ */
+               rearm = 1;
+       }
+       if (io_events)
+               irq_poll_sched(&pbe_eq->iopoll);
+       hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
+       return IRQ_HANDLED;
 }
 
 
@@ -1077,57 +897,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
        iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 }
 
-static unsigned int
-beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
-                         struct beiscsi_hba *phba,
-                         struct pdu_base *ppdu,
-                         unsigned long pdu_len,
-                         void *pbuffer, unsigned long buf_len)
-{
-       struct iscsi_conn *conn = beiscsi_conn->conn;
-       struct iscsi_session *session = conn->session;
-       struct iscsi_task *task;
-       struct beiscsi_io_task *io_task;
-       struct iscsi_hdr *login_hdr;
-
-       switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
-                                               PDUBASE_OPCODE_MASK) {
-       case ISCSI_OP_NOOP_IN:
-               pbuffer = NULL;
-               buf_len = 0;
-               break;
-       case ISCSI_OP_ASYNC_EVENT:
-               break;
-       case ISCSI_OP_REJECT:
-               WARN_ON(!pbuffer);
-               WARN_ON(!(buf_len == 48));
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
-                           "BM_%d : In ISCSI_OP_REJECT\n");
-               break;
-       case ISCSI_OP_LOGIN_RSP:
-       case ISCSI_OP_TEXT_RSP:
-               task = conn->login_task;
-               io_task = task->dd_data;
-               login_hdr = (struct iscsi_hdr *)ppdu;
-               login_hdr->itt = io_task->libiscsi_itt;
-               break;
-       default:
-               beiscsi_log(phba, KERN_WARNING,
-                           BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
-                           "BM_%d : Unrecognized opcode 0x%x in async msg\n",
-                           (ppdu->
-                            dw[offsetof(struct amap_pdu_base, opcode) / 32]
-                            & PDUBASE_OPCODE_MASK));
-               return 1;
-       }
-
-       spin_lock_bh(&session->back_lock);
-       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
-       spin_unlock_bh(&session->back_lock);
-       return 0;
-}
-
 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 {
        struct sgl_handle *psgl_handle;
@@ -1199,6 +968,9 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
                pwrb_context->alloc_index++;
        spin_unlock_bh(&pwrb_context->wrb_lock);
 
+       if (pwrb_handle)
+               memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
+
        return pwrb_handle;
 }
 
@@ -1440,11 +1212,10 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
                       struct beiscsi_hba *phba, struct sol_cqe *psol)
 {
        struct hwi_wrb_context *pwrb_context;
-       struct wrb_handle *pwrb_handle = NULL;
+       uint16_t wrb_index, cid, cri_index;
        struct hwi_controller *phwi_ctrlr;
+       struct wrb_handle *pwrb_handle;
        struct iscsi_task *task;
-       struct beiscsi_io_task *io_task;
-       uint16_t wrb_index, cid, cri_index;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        if (is_chip_be2_be3r(phba)) {
@@ -1463,9 +1234,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
        task = pwrb_handle->pio_handle;
-
-       io_task = task->dd_data;
-       memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
        iscsi_put_task(task);
 }
 
@@ -1614,431 +1382,428 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
        spin_unlock_bh(&session->back_lock);
 }
 
-static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
-                                         *pasync_ctx, unsigned int is_header,
-                                         unsigned int host_write_ptr)
+/**
+ * ASYNC PDUs include
+ * a. Unsolicited NOP-In (target initiated NOP-In)
+ * b. ASYNC Messages
+ * c. Reject PDU
+ * d. Login response
+ * These headers arrive unprocessed by the EP firmware.
+ * iSCSI layer processes them.
+ */
+static unsigned int
+beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
+               struct pdu_base *phdr, void *pdata, unsigned int dlen)
 {
-       if (is_header)
-               return &pasync_ctx->async_entry[host_write_ptr].
-                   header_busy_list;
-       else
-               return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct iscsi_conn *conn = beiscsi_conn->conn;
+       struct beiscsi_io_task *io_task;
+       struct iscsi_hdr *login_hdr;
+       struct iscsi_task *task;
+       u8 code;
+
+       code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
+       switch (code) {
+       case ISCSI_OP_NOOP_IN:
+               pdata = NULL;
+               dlen = 0;
+               break;
+       case ISCSI_OP_ASYNC_EVENT:
+               break;
+       case ISCSI_OP_REJECT:
+               WARN_ON(!pdata);
+               WARN_ON(!(dlen == 48));
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                           "BM_%d : In ISCSI_OP_REJECT\n");
+               break;
+       case ISCSI_OP_LOGIN_RSP:
+       case ISCSI_OP_TEXT_RSP:
+               task = conn->login_task;
+               io_task = task->dd_data;
+               login_hdr = (struct iscsi_hdr *)phdr;
+               login_hdr->itt = io_task->libiscsi_itt;
+               break;
+       default:
+               beiscsi_log(phba, KERN_WARNING,
+                           BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                           "BM_%d : unrecognized async PDU opcode 0x%x\n",
+                           code);
+               return 1;
+       }
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
+       return 0;
+}
+
+static inline void
+beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
+                        struct hd_async_handle *pasync_handle)
+{
+       if (pasync_handle->is_header) {
+               list_add_tail(&pasync_handle->link,
+                               &pasync_ctx->async_header.free_list);
+               pasync_ctx->async_header.free_entries++;
+       } else {
+               list_add_tail(&pasync_handle->link,
+                               &pasync_ctx->async_data.free_list);
+               pasync_ctx->async_data.free_entries++;
+       }
 }
 
-static struct async_pdu_handle *
-hwi_get_async_handle(struct beiscsi_hba *phba,
-                    struct beiscsi_conn *beiscsi_conn,
-                    struct hwi_async_pdu_context *pasync_ctx,
-                    struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
+static struct hd_async_handle *
+beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
+                      struct hd_async_context *pasync_ctx,
+                      struct i_t_dpdu_cqe *pdpdu_cqe)
 {
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct hd_async_handle *pasync_handle;
        struct be_bus_address phys_addr;
-       struct list_head *pbusy_list;
-       struct async_pdu_handle *pasync_handle = NULL;
-       unsigned char is_header = 0;
-       unsigned int index, dpl;
+       u8 final, error = 0;
+       u16 cid, code, ci;
+       u32 dpl;
 
+       cid = beiscsi_conn->beiscsi_conn_cid;
+       /**
+        * This function is invoked to get the right async_handle structure
+        * from a given DEF PDU CQ entry.
+        *
+        * - index in CQ entry gives the vertical index
+        * - address in CQ entry is the offset where the DMA last ended
+        * - final - no more notifications for this PDU
+        */
        if (is_chip_be2_be3r(phba)) {
                dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                      index, pdpdu_cqe);
+               final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+                                     final, pdpdu_cqe);
        } else {
                dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+               ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                      index, pdpdu_cqe);
+               final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+                                     final, pdpdu_cqe);
        }
 
-       phys_addr.u.a32.address_lo =
-               (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-                                       db_addr_lo) / 32] - dpl);
-       phys_addr.u.a32.address_hi =
-               pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-                                      db_addr_hi) / 32];
-
-       phys_addr.u.a64.address =
-                       *((unsigned long long *)(&phys_addr.u.a64.address));
-
-       switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
-                       & PDUCQE_CODE_MASK) {
+       /**
+        * DB addr Hi/Lo is same for BE and SKH.
+        * Subtract the dataplacementlength to get to the base.
+        */
+       phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+                                                  db_addr_lo, pdpdu_cqe);
+       phys_addr.u.a32.address_lo -= dpl;
+       phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+                                                  db_addr_hi, pdpdu_cqe);
+
+       code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
+       switch (code) {
        case UNSOL_HDR_NOTIFY:
-               is_header = 1;
-
-                pbusy_list = hwi_get_async_busy_list(pasync_ctx,
-                                                     is_header, index);
+               pasync_handle = pasync_ctx->async_entry[ci].header;
                break;
+       case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
+               error = 1;
        case UNSOL_DATA_NOTIFY:
-                pbusy_list = hwi_get_async_busy_list(pasync_ctx,
-                                                     is_header, index);
+               pasync_handle = pasync_ctx->async_entry[ci].data;
                break;
+       /* called only for above codes */
        default:
-               pbusy_list = NULL;
-               beiscsi_log(phba, KERN_WARNING,
-                           BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
-                           "BM_%d : Unexpected code=%d\n",
-                           pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-                           code) / 32] & PDUCQE_CODE_MASK);
-               return NULL;
+               pasync_handle = NULL;
+               break;
        }
 
-       WARN_ON(list_empty(pbusy_list));
-       list_for_each_entry(pasync_handle, pbusy_list, link) {
-               if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
-                       break;
+       if (!pasync_handle) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+                           "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
+                           cid, code, ci, phys_addr.u.a64.address);
+               return pasync_handle;
        }
 
-       WARN_ON(!pasync_handle);
+       if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
+           pasync_handle->index != ci) {
+               /* driver bug - if ci does not match async handle index */
+               error = 1;
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+                           "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
+                           cid, pasync_handle->is_header ? 'H' : 'D',
+                           pasync_handle->pa.u.a64.address,
+                           pasync_handle->index,
+                           phys_addr.u.a64.address, ci);
+               /* FW has stale address - attempt continuing by dropping */
+       }
 
-       pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
-                            beiscsi_conn->beiscsi_conn_cid);
-       pasync_handle->is_header = is_header;
+       /**
+        * Each CID is associated with unique CRI.
+        * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
+        **/
+       pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
+       pasync_handle->is_final = final;
        pasync_handle->buffer_len = dpl;
-       *pcq_index = index;
+       /* empty the slot */
+       if (pasync_handle->is_header)
+               pasync_ctx->async_entry[ci].header = NULL;
+       else
+               pasync_ctx->async_entry[ci].data = NULL;
 
+       /**
+        * DEF PDU header and data buffers with errors should be simply
+        * dropped as there are no consumers for it.
+        */
+       if (error) {
+               beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+               pasync_handle = NULL;
+       }
        return pasync_handle;
 }
 
-static unsigned int
-hwi_update_async_writables(struct beiscsi_hba *phba,
-                           struct hwi_async_pdu_context *pasync_ctx,
-                           unsigned int is_header, unsigned int cq_index)
+static void
+beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
+                         struct hd_async_context *pasync_ctx,
+                         u16 cri)
 {
-       struct list_head *pbusy_list;
-       struct async_pdu_handle *pasync_handle;
-       unsigned int num_entries, writables = 0;
-       unsigned int *pep_read_ptr, *pwritables;
-
-       num_entries = pasync_ctx->num_entries;
-       if (is_header) {
-               pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
-               pwritables = &pasync_ctx->async_header.writables;
-       } else {
-               pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
-               pwritables = &pasync_ctx->async_data.writables;
-       }
-
-       while ((*pep_read_ptr) != cq_index) {
-               (*pep_read_ptr)++;
-               *pep_read_ptr = (*pep_read_ptr) % num_entries;
-
-               pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
-                                                    *pep_read_ptr);
-               if (writables == 0)
-                       WARN_ON(list_empty(pbusy_list));
-
-               if (!list_empty(pbusy_list)) {
-                       pasync_handle = list_entry(pbusy_list->next,
-                                                  struct async_pdu_handle,
-                                                  link);
-                       WARN_ON(!pasync_handle);
-                       pasync_handle->consumed = 1;
-               }
-
-               writables++;
-       }
+       struct hd_async_handle *pasync_handle, *tmp_handle;
+       struct list_head *plist;
 
-       if (!writables) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
-                           "BM_%d : Duplicate notification received - index 0x%x!!\n",
-                           cq_index);
-               WARN_ON(1);
+       plist  = &pasync_ctx->async_entry[cri].wq.list;
+       list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
+               list_del(&pasync_handle->link);
+               beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
        }
 
-       *pwritables = *pwritables + writables;
-       return 0;
+       INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
+       pasync_ctx->async_entry[cri].wq.hdr_len = 0;
+       pasync_ctx->async_entry[cri].wq.bytes_received = 0;
+       pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
 }
 
-static void hwi_free_async_msg(struct beiscsi_hba *phba,
-                              struct hwi_async_pdu_context *pasync_ctx,
-                              unsigned int cri)
+static unsigned int
+beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
+                   struct hd_async_context *pasync_ctx,
+                   u16 cri)
 {
-       struct async_pdu_handle *pasync_handle, *tmp_handle;
+       struct iscsi_session *session = beiscsi_conn->conn->session;
+       struct hd_async_handle *pasync_handle, *plast_handle;
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       void *phdr = NULL, *pdata = NULL;
+       u32 dlen = 0, status = 0;
        struct list_head *plist;
 
-       plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
-       list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
-               list_del(&pasync_handle->link);
-
-               if (pasync_handle->is_header) {
-                       list_add_tail(&pasync_handle->link,
-                                     &pasync_ctx->async_header.free_list);
-                       pasync_ctx->async_header.free_entries++;
-               } else {
-                       list_add_tail(&pasync_handle->link,
-                                     &pasync_ctx->async_data.free_list);
-                       pasync_ctx->async_data.free_entries++;
+       plist = &pasync_ctx->async_entry[cri].wq.list;
+       plast_handle = NULL;
+       list_for_each_entry(pasync_handle, plist, link) {
+               plast_handle = pasync_handle;
+               /* get the header, the first entry */
+               if (!phdr) {
+                       phdr = pasync_handle->pbuffer;
+                       continue;
                }
+               /* use first buffer to collect all the data */
+               if (!pdata) {
+                       pdata = pasync_handle->pbuffer;
+                       dlen = pasync_handle->buffer_len;
+                       continue;
+               }
+               memcpy(pdata + dlen, pasync_handle->pbuffer,
+                      pasync_handle->buffer_len);
+               dlen += pasync_handle->buffer_len;
        }
 
-       INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
-       pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
-       pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+       if (!plast_handle->is_final) {
+               /* last handle should have final PDU notification from FW */
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+                           "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
+                           beiscsi_conn->beiscsi_conn_cid, plast_handle,
+                           pasync_ctx->async_entry[cri].wq.hdr_len,
+                           pasync_ctx->async_entry[cri].wq.bytes_needed,
+                           pasync_ctx->async_entry[cri].wq.bytes_received);
+       }
+       spin_lock_bh(&session->back_lock);
+       status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
+       spin_unlock_bh(&session->back_lock);
+       beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
+       return status;
 }
 
-static struct phys_addr *
-hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
-                    unsigned int is_header, unsigned int host_write_ptr)
+static unsigned int
+beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
+                      struct hd_async_context *pasync_ctx,
+                      struct hd_async_handle *pasync_handle)
 {
-       struct phys_addr *pasync_sge = NULL;
+       unsigned int bytes_needed = 0, status = 0;
+       u16 cri = pasync_handle->cri;
+       struct cri_wait_queue *wq;
+       struct beiscsi_hba *phba;
+       struct pdu_base *ppdu;
+       char *err = "";
 
-       if (is_header)
-               pasync_sge = pasync_ctx->async_header.ring_base;
-       else
-               pasync_sge = pasync_ctx->async_data.ring_base;
+       phba = beiscsi_conn->phba;
+       wq = &pasync_ctx->async_entry[cri].wq;
+       if (pasync_handle->is_header) {
+               /* check if PDU hdr is rcv'd when old hdr not completed */
+               if (wq->hdr_len) {
+                       err = "incomplete";
+                       goto drop_pdu;
+               }
+               ppdu = pasync_handle->pbuffer;
+               bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
+                                            data_len_hi, ppdu);
+               bytes_needed <<= 16;
+               bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
+                                                         data_len_lo, ppdu));
+               wq->hdr_len = pasync_handle->buffer_len;
+               wq->bytes_received = 0;
+               wq->bytes_needed = bytes_needed;
+               list_add_tail(&pasync_handle->link, &wq->list);
+               if (!bytes_needed)
+                       status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
+                                                    pasync_ctx, cri);
+       } else {
+               /* check if data received has header and is needed */
+               if (!wq->hdr_len || !wq->bytes_needed) {
+                       err = "header less";
+                       goto drop_pdu;
+               }
+               wq->bytes_received += pasync_handle->buffer_len;
+               /* Something got overwritten? Better catch it here. */
+               if (wq->bytes_received > wq->bytes_needed) {
+                       err = "overflow";
+                       goto drop_pdu;
+               }
+               list_add_tail(&pasync_handle->link, &wq->list);
+               if (wq->bytes_received == wq->bytes_needed)
+                       status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
+                                                    pasync_ctx, cri);
+       }
+       return status;
 
-       return pasync_sge + host_write_ptr;
+drop_pdu:
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+                   "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
+                   beiscsi_conn->beiscsi_conn_cid, err,
+                   pasync_handle->is_header ? 'H' : 'D',
+                   wq->hdr_len, wq->bytes_needed,
+                   pasync_handle->buffer_len);
+       /* discard this handle */
+       beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+       /* free all the other handles in cri_wait_queue */
+       beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
+       /* try continuing */
+       return status;
 }
 
-static void hwi_post_async_buffers(struct beiscsi_hba *phba,
-                                   unsigned int is_header, uint8_t ulp_num)
+static void
+beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
+                        u8 header, u8 ulp_num)
 {
+       struct hd_async_handle *pasync_handle, *tmp, **slot;
+       struct hd_async_context *pasync_ctx;
        struct hwi_controller *phwi_ctrlr;
-       struct hwi_async_pdu_context *pasync_ctx;
-       struct async_pdu_handle *pasync_handle;
-       struct list_head *pfree_link, *pbusy_list;
+       struct list_head *hfree_list;
        struct phys_addr *pasync_sge;
-       unsigned int ring_id, num_entries;
-       unsigned int host_write_num, doorbell_offset;
-       unsigned int writables;
-       unsigned int i = 0;
-       u32 doorbell = 0;
+       u32 ring_id, doorbell = 0;
+       u16 index, num_entries;
+       u32 doorbell_offset;
+       u16 prod = 0, cons;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
        num_entries = pasync_ctx->num_entries;
-
-       if (is_header) {
-               writables = min(pasync_ctx->async_header.writables,
-                               pasync_ctx->async_header.free_entries);
-               pfree_link = pasync_ctx->async_header.free_list.next;
-               host_write_num = pasync_ctx->async_header.host_write_ptr;
+       if (header) {
+               cons = pasync_ctx->async_header.free_entries;
+               hfree_list = &pasync_ctx->async_header.free_list;
                ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
                doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
-                                 doorbell_offset;
+                                       doorbell_offset;
        } else {
-               writables = min(pasync_ctx->async_data.writables,
-                               pasync_ctx->async_data.free_entries);
-               pfree_link = pasync_ctx->async_data.free_list.next;
-               host_write_num = pasync_ctx->async_data.host_write_ptr;
+               cons = pasync_ctx->async_data.free_entries;
+               hfree_list = &pasync_ctx->async_data.free_list;
                ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
                doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
-                                 doorbell_offset;
+                                       doorbell_offset;
        }
+       /* number of entries posted must be in multiples of 8 */
+       if (cons % 8)
+               return;
 
-       writables = (writables / 8) * 8;
-       if (writables) {
-               for (i = 0; i < writables; i++) {
-                       pbusy_list =
-                           hwi_get_async_busy_list(pasync_ctx, is_header,
-                                                   host_write_num);
-                       pasync_handle =
-                           list_entry(pfree_link, struct async_pdu_handle,
-                                                               link);
-                       WARN_ON(!pasync_handle);
-                       pasync_handle->consumed = 0;
-
-                       pfree_link = pfree_link->next;
-
-                       pasync_sge = hwi_get_ring_address(pasync_ctx,
-                                               is_header, host_write_num);
-
-                       pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
-                       pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
-
-                       list_move(&pasync_handle->link, pbusy_list);
-
-                       host_write_num++;
-                       host_write_num = host_write_num % num_entries;
-               }
-
-               if (is_header) {
-                       pasync_ctx->async_header.host_write_ptr =
-                                                       host_write_num;
-                       pasync_ctx->async_header.free_entries -= writables;
-                       pasync_ctx->async_header.writables -= writables;
-                       pasync_ctx->async_header.busy_entries += writables;
-               } else {
-                       pasync_ctx->async_data.host_write_ptr = host_write_num;
-                       pasync_ctx->async_data.free_entries -= writables;
-                       pasync_ctx->async_data.writables -= writables;
-                       pasync_ctx->async_data.busy_entries += writables;
-               }
-
-               doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
-               doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
-               doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
-               doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
-                                       << DB_DEF_PDU_CQPROC_SHIFT;
-
-               iowrite32(doorbell, phba->db_va + doorbell_offset);
-       }
-}
-
-static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
-                                        struct beiscsi_conn *beiscsi_conn,
-                                        struct i_t_dpdu_cqe *pdpdu_cqe)
-{
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_async_pdu_context *pasync_ctx;
-       struct async_pdu_handle *pasync_handle = NULL;
-       unsigned int cq_index = -1;
-       uint16_t cri_index = BE_GET_CRI_FROM_CID(
-                            beiscsi_conn->beiscsi_conn_cid);
-
-       phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
-                    BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-                    cri_index));
-
-       pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
-                                            pdpdu_cqe, &cq_index);
-       BUG_ON(pasync_handle->is_header != 0);
-       if (pasync_handle->consumed == 0)
-               hwi_update_async_writables(phba, pasync_ctx,
-                                          pasync_handle->is_header, cq_index);
-
-       hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
-       hwi_post_async_buffers(phba, pasync_handle->is_header,
-                              BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-                              cri_index));
-}
-
-static unsigned int
-hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
-                 struct beiscsi_hba *phba,
-                 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
-{
-       struct list_head *plist;
-       struct async_pdu_handle *pasync_handle;
-       void *phdr = NULL;
-       unsigned int hdr_len = 0, buf_len = 0;
-       unsigned int status, index = 0, offset = 0;
-       void *pfirst_buffer = NULL;
-       unsigned int num_buf = 0;
-
-       plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+       list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) {
+               list_del_init(&pasync_handle->link);
+               pasync_handle->is_final = 0;
+               pasync_handle->buffer_len = 0;
 
-       list_for_each_entry(pasync_handle, plist, link) {
-               if (index == 0) {
-                       phdr = pasync_handle->pbuffer;
-                       hdr_len = pasync_handle->buffer_len;
-               } else {
-                       buf_len = pasync_handle->buffer_len;
-                       if (!num_buf) {
-                               pfirst_buffer = pasync_handle->pbuffer;
-                               num_buf++;
-                       }
-                       memcpy(pfirst_buffer + offset,
-                              pasync_handle->pbuffer, buf_len);
-                       offset += buf_len;
+               /* handles can be consumed out of order, use index in handle */
+               index = pasync_handle->index;
+               WARN_ON(pasync_handle->is_header != header);
+               if (header)
+                       slot = &pasync_ctx->async_entry[index].header;
+               else
+                       slot = &pasync_ctx->async_entry[index].data;
+               /**
+                * The slot just tracks handle's hold and release, so
+                * overwriting at the same index won't do any harm but
+                * needs to be caught.
+                */
+               if (*slot != NULL) {
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+                                   "BM_%d : async PDU %s slot at %u not empty\n",
+                                   header ? "header" : "data", index);
                }
-               index++;
+               /**
+                * We use same freed index as in completion to post so this
+                * operation is not required for refills. Its required only
+                * for ring creation.
+                */
+               if (header)
+                       pasync_sge = pasync_ctx->async_header.ring_base;
+               else
+                       pasync_sge = pasync_ctx->async_data.ring_base;
+               pasync_sge += index;
+               /* if its a refill then address is same; hi is lo */
+               WARN_ON(pasync_sge->hi &&
+                       pasync_sge->hi != pasync_handle->pa.u.a32.address_lo);
+               WARN_ON(pasync_sge->lo &&
+                       pasync_sge->lo != pasync_handle->pa.u.a32.address_hi);
+               pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
+               pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
+
+               *slot = pasync_handle;
+               if (++prod == cons)
+                       break;
        }
+       if (header)
+               pasync_ctx->async_header.free_entries -= prod;
+       else
+               pasync_ctx->async_data.free_entries -= prod;
 
-       status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-                                           phdr, hdr_len, pfirst_buffer,
-                                           offset);
-
-       hwi_free_async_msg(phba, pasync_ctx, cri);
-       return 0;
-}
-
-static unsigned int
-hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
-                    struct beiscsi_hba *phba,
-                    struct async_pdu_handle *pasync_handle)
-{
-       struct hwi_async_pdu_context *pasync_ctx;
-       struct hwi_controller *phwi_ctrlr;
-       unsigned int bytes_needed = 0, status = 0;
-       unsigned short cri = pasync_handle->cri;
-       struct pdu_base *ppdu;
-
-       phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
-                    BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-                    BE_GET_CRI_FROM_CID(beiscsi_conn->
-                                beiscsi_conn_cid)));
-
-       list_del(&pasync_handle->link);
-       if (pasync_handle->is_header) {
-               pasync_ctx->async_header.busy_entries--;
-               if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
-                       hwi_free_async_msg(phba, pasync_ctx, cri);
-                       BUG();
-               }
-
-               pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
-               pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
-               pasync_ctx->async_entry[cri].wait_queue.hdr_len =
-                               (unsigned short)pasync_handle->buffer_len;
-               list_add_tail(&pasync_handle->link,
-                             &pasync_ctx->async_entry[cri].wait_queue.list);
-
-               ppdu = pasync_handle->pbuffer;
-               bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
-                       data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
-                       0xFFFF0000) | ((be16_to_cpu((ppdu->
-                       dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
-                       & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
-
-               if (status == 0) {
-                       pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
-                           bytes_needed;
-
-                       if (bytes_needed == 0)
-                               status = hwi_fwd_async_msg(beiscsi_conn, phba,
-                                                          pasync_ctx, cri);
-               }
-       } else {
-               pasync_ctx->async_data.busy_entries--;
-               if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
-                       list_add_tail(&pasync_handle->link,
-                                     &pasync_ctx->async_entry[cri].wait_queue.
-                                     list);
-                       pasync_ctx->async_entry[cri].wait_queue.
-                               bytes_received +=
-                               (unsigned short)pasync_handle->buffer_len;
-
-                       if (pasync_ctx->async_entry[cri].wait_queue.
-                           bytes_received >=
-                           pasync_ctx->async_entry[cri].wait_queue.
-                           bytes_needed)
-                               status = hwi_fwd_async_msg(beiscsi_conn, phba,
-                                                          pasync_ctx, cri);
-               }
-       }
-       return status;
+       doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
+       doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
+       doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
+       doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
+       iowrite32(doorbell, phba->db_va + doorbell_offset);
 }
 
-static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
-                                        struct beiscsi_hba *phba,
-                                        struct i_t_dpdu_cqe *pdpdu_cqe)
+static void
+beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
+                         struct i_t_dpdu_cqe *pdpdu_cqe)
 {
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct hd_async_handle *pasync_handle = NULL;
+       struct hd_async_context *pasync_ctx;
        struct hwi_controller *phwi_ctrlr;
-       struct hwi_async_pdu_context *pasync_ctx;
-       struct async_pdu_handle *pasync_handle = NULL;
-       unsigned int cq_index = -1;
-       uint16_t cri_index = BE_GET_CRI_FROM_CID(
-                            beiscsi_conn->beiscsi_conn_cid);
+       u16 cid_cri;
+       u8 ulp_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
-                    BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-                    cri_index));
-
-       pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
-                                            pdpdu_cqe, &cq_index);
-
-       if (pasync_handle->consumed == 0)
-               hwi_update_async_writables(phba, pasync_ctx,
-                                          pasync_handle->is_header, cq_index);
+       cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+       ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
+       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
+       pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
+                                              pdpdu_cqe);
+       if (!pasync_handle)
+               return;
 
-       hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
-       hwi_post_async_buffers(phba, pasync_handle->is_header,
-                              BEISCSI_GET_ULP_FROM_CRI(
-                              phwi_ctrlr, cri_index));
+       beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
+       beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num);
 }
 
 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
@@ -2051,6 +1816,9 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
        mcc_compl = queue_tail_node(mcc_cq);
        mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
        while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
+               if (beiscsi_hba_in_error(phba))
+                       return;
+
                if (num_processed >= 32) {
                        hwi_ring_cq_db(phba, mcc_cq->id,
                                        num_processed, 0);
@@ -2073,6 +1841,19 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
                hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
 }
 
+static void beiscsi_mcc_work(struct work_struct *work)
+{
+       struct be_eq_obj *pbe_eq;
+       struct beiscsi_hba *phba;
+
+       pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
+       phba = pbe_eq->phba;
+       beiscsi_process_mcc_cq(phba);
+       /* rearm EQ for further interrupts */
+       if (!beiscsi_hba_in_error(phba))
+               hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+}
+
 /**
  * beiscsi_process_cq()- Process the Completion Queue
  * @pbe_eq: Event Q on which the Completion has come
@@ -2101,6 +1882,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 
        while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
               CQE_VALID_MASK) {
+               if (beiscsi_hba_in_error(phba))
+                       return 0;
+
                be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
 
                 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
@@ -2165,8 +1949,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
                                    cqe_desc[code], code, cid);
 
                        spin_lock_bh(&phba->async_pdu_lock);
-                       hwi_process_default_pdu_ring(beiscsi_conn, phba,
-                                            (struct i_t_dpdu_cqe *)sol);
+                       beiscsi_hdq_process_compl(beiscsi_conn,
+                                                 (struct i_t_dpdu_cqe *)sol);
                        spin_unlock_bh(&phba->async_pdu_lock);
                        break;
                case UNSOL_DATA_NOTIFY:
@@ -2176,8 +1960,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
                                    cqe_desc[code], code, cid);
 
                        spin_lock_bh(&phba->async_pdu_lock);
-                       hwi_process_default_pdu_ring(beiscsi_conn, phba,
-                                            (struct i_t_dpdu_cqe *)sol);
+                       beiscsi_hdq_process_compl(beiscsi_conn,
+                                                 (struct i_t_dpdu_cqe *)sol);
                        spin_unlock_bh(&phba->async_pdu_lock);
                        break;
                case CXN_INVALIDATE_INDEX_NOTIFY:
@@ -2213,8 +1997,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
                                    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
                                    cqe_desc[code], code, cid);
                        spin_lock_bh(&phba->async_pdu_lock);
-                       hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
-                                            (struct i_t_dpdu_cqe *) sol);
+                       /* driver consumes the entry and drops the contents */
+                       beiscsi_hdq_process_compl(beiscsi_conn,
+                                                 (struct i_t_dpdu_cqe *)sol);
                        spin_unlock_bh(&phba->async_pdu_lock);
                        break;
                case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
@@ -2262,60 +2047,32 @@ proc_next_cqe:
        return total;
 }
 
-void beiscsi_process_all_cqs(struct work_struct *work)
-{
-       unsigned long flags;
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_context_memory *phwi_context;
-       struct beiscsi_hba *phba;
-       struct be_eq_obj *pbe_eq =
-           container_of(work, struct be_eq_obj, work_cqs);
-
-       phba = pbe_eq->phba;
-       phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_context = phwi_ctrlr->phwi_ctxt;
-
-       if (pbe_eq->todo_mcc_cq) {
-               spin_lock_irqsave(&phba->isr_lock, flags);
-               pbe_eq->todo_mcc_cq = false;
-               spin_unlock_irqrestore(&phba->isr_lock, flags);
-               beiscsi_process_mcc_cq(phba);
-       }
-
-       if (pbe_eq->todo_cq) {
-               spin_lock_irqsave(&phba->isr_lock, flags);
-               pbe_eq->todo_cq = false;
-               spin_unlock_irqrestore(&phba->isr_lock, flags);
-               beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
-       }
-
-       /* rearm EQ for further interrupts */
-       hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
-}
-
 static int be_iopoll(struct irq_poll *iop, int budget)
 {
-       unsigned int ret, num_eq_processed;
+       unsigned int ret, io_events;
        struct beiscsi_hba *phba;
        struct be_eq_obj *pbe_eq;
        struct be_eq_entry *eqe = NULL;
        struct be_queue_info *eq;
 
-       num_eq_processed = 0;
        pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
        phba = pbe_eq->phba;
+       if (beiscsi_hba_in_error(phba)) {
+               irq_poll_complete(iop);
+               return 0;
+       }
+
+       io_events = 0;
        eq = &pbe_eq->q;
        eqe = queue_tail_node(eq);
-
        while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
                        EQE_VALID_MASK) {
                AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
                queue_tail_inc(eq);
                eqe = queue_tail_node(eq);
-               num_eq_processed++;
+               io_events++;
        }
-
-       hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+       hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
 
        ret = beiscsi_process_cq(pbe_eq, budget);
        pbe_eq->cq_count += ret;
@@ -2325,7 +2082,8 @@ static int be_iopoll(struct irq_poll *iop, int budget)
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
                            "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
                            pbe_eq->q.id, ret);
-               hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+               if (!beiscsi_hba_in_error(phba))
+                       hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
        }
        return ret;
 }
@@ -2691,20 +2449,20 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
                                          (ulp_num * MEM_DESCR_OFFSET));
                        phba->mem_req[mem_descr_index] =
                                          BEISCSI_GET_CID_COUNT(phba, ulp_num) *
-                                         sizeof(struct async_pdu_handle);
+                                         sizeof(struct hd_async_handle);
 
                        mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
                                          (ulp_num * MEM_DESCR_OFFSET));
                        phba->mem_req[mem_descr_index] =
                                          BEISCSI_GET_CID_COUNT(phba, ulp_num) *
-                                         sizeof(struct async_pdu_handle);
+                                         sizeof(struct hd_async_handle);
 
                        mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
                                          (ulp_num * MEM_DESCR_OFFSET));
                        phba->mem_req[mem_descr_index] =
-                                         sizeof(struct hwi_async_pdu_context) +
+                                         sizeof(struct hd_async_context) +
                                         (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
-                                         sizeof(struct hwi_async_entry));
+                                         sizeof(struct hd_async_entry));
                }
        }
 }
@@ -2963,35 +2721,34 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        uint8_t ulp_num;
        struct hwi_controller *phwi_ctrlr;
        struct hba_parameters *p = &phba->params;
-       struct hwi_async_pdu_context *pasync_ctx;
-       struct async_pdu_handle *pasync_header_h, *pasync_data_h;
+       struct hd_async_context *pasync_ctx;
+       struct hd_async_handle *pasync_header_h, *pasync_data_h;
        unsigned int index, idx, num_per_mem, num_async_data;
        struct be_mem_descriptor *mem_descr;
 
        for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
                if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
+                        /* get async_ctx for each ULP */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET));
 
                        phwi_ctrlr = phba->phwi_ctrlr;
                        phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
-                               (struct hwi_async_pdu_context *)
+                               (struct hd_async_context *)
                                 mem_descr->mem_array[0].virtual_address;
 
                        pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
                        memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
                        pasync_ctx->async_entry =
-                                       (struct hwi_async_entry *)
+                                       (struct hd_async_entry *)
                                        ((long unsigned int)pasync_ctx +
-                                       sizeof(struct hwi_async_pdu_context));
+                                       sizeof(struct hd_async_context));
 
                        pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
                                                  ulp_num);
-                       pasync_ctx->buffer_size = p->defpdu_hdr_sz;
-
+                       /* setup header buffers */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
                                (ulp_num * MEM_DESCR_OFFSET);
@@ -3008,6 +2765,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                                            "BM_%d : No Virtual address for ULP : %d\n",
                                            ulp_num);
 
+                       pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
                        pasync_ctx->async_header.va_base =
                                mem_descr->mem_array[0].virtual_address;
 
@@ -3015,6 +2773,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                                mem_descr->mem_array[0].
                                bus_address.u.a64.address;
 
+                       /* setup header buffer sgls */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET);
@@ -3034,6 +2793,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                        pasync_ctx->async_header.ring_base =
                                mem_descr->mem_array[0].virtual_address;
 
+                       /* setup header buffer handles */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET);
@@ -3052,9 +2812,9 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 
                        pasync_ctx->async_header.handle_base =
                                mem_descr->mem_array[0].virtual_address;
-                       pasync_ctx->async_header.writables = 0;
                        INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
 
+                       /* setup data buffer sgls */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET);
@@ -3074,6 +2834,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                        pasync_ctx->async_data.ring_base =
                                mem_descr->mem_array[0].virtual_address;
 
+                       /* setup data buffer handles */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET);
@@ -3085,16 +2846,16 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 
                        pasync_ctx->async_data.handle_base =
                                mem_descr->mem_array[0].virtual_address;
-                       pasync_ctx->async_data.writables = 0;
                        INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
 
                        pasync_header_h =
-                               (struct async_pdu_handle *)
+                               (struct hd_async_handle *)
                                pasync_ctx->async_header.handle_base;
                        pasync_data_h =
-                               (struct async_pdu_handle *)
+                               (struct hd_async_handle *)
                                pasync_ctx->async_data.handle_base;
 
+                       /* setup data buffers */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET);
@@ -3112,6 +2873,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                                            ulp_num);
 
                        idx = 0;
+                       pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
                        pasync_ctx->async_data.va_base =
                                mem_descr->mem_array[idx].virtual_address;
                        pasync_ctx->async_data.pa_base.u.a64.address =
@@ -3125,7 +2887,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                        for (index = 0; index < BEISCSI_GET_CID_COUNT
                                        (phba, ulp_num); index++) {
                                pasync_header_h->cri = -1;
-                               pasync_header_h->index = (char)index;
+                               pasync_header_h->is_header = 1;
+                               pasync_header_h->index = index;
                                INIT_LIST_HEAD(&pasync_header_h->link);
                                pasync_header_h->pbuffer =
                                        (void *)((unsigned long)
@@ -3142,14 +2905,13 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                                              free_list);
                                pasync_header_h++;
                                pasync_ctx->async_header.free_entries++;
-                               pasync_ctx->async_header.writables++;
-
-                               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-                                              wait_queue.list);
                                INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-                                              header_busy_list);
+                                               wq.list);
+                               pasync_ctx->async_entry[index].header = NULL;
+
                                pasync_data_h->cri = -1;
-                               pasync_data_h->index = (char)index;
+                               pasync_data_h->is_header = 0;
+                               pasync_data_h->index = index;
                                INIT_LIST_HEAD(&pasync_data_h->link);
 
                                if (!num_async_data) {
@@ -3184,16 +2946,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
                                              free_list);
                                pasync_data_h++;
                                pasync_ctx->async_data.free_entries++;
-                               pasync_ctx->async_data.writables++;
-
-                               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-                                              data_busy_list);
+                               pasync_ctx->async_entry[index].data = NULL;
                        }
-
-                       pasync_ctx->async_header.host_write_ptr = 0;
-                       pasync_ctx->async_header.ep_read_ptr = -1;
-                       pasync_ctx->async_data.host_write_ptr = 0;
-                       pasync_ctx->async_data.ep_read_ptr = -1;
                }
        }
 
@@ -3265,8 +3019,8 @@ static int be_fill_queue(struct be_queue_info *q,
 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                             struct hwi_context_memory *phwi_context)
 {
+       int ret = -ENOMEM, eq_for_mcc;
        unsigned int i, num_eq_pages;
-       int ret = 0, eq_for_mcc;
        struct be_queue_info *eq;
        struct be_dma_mem *mem;
        void *eq_vaddress;
@@ -3284,8 +3038,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                mem = &eq->dma_mem;
                phwi_context->be_eq[i].phba = phba;
                eq_vaddress = pci_alloc_consistent(phba->pcidev,
-                                                    num_eq_pages * PAGE_SIZE,
-                                                    &paddr);
+                                                  num_eq_pages * PAGE_SIZE,
+                                                  &paddr);
                if (!eq_vaddress)
                        goto create_eq_error;
 
@@ -3313,6 +3067,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                            phwi_context->be_eq[i].q.id);
        }
        return 0;
+
 create_eq_error:
        for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
                eq = &phwi_context->be_eq[i].q;
@@ -3329,11 +3084,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
                             struct hwi_context_memory *phwi_context)
 {
        unsigned int i, num_cq_pages;
-       int ret = 0;
        struct be_queue_info *cq, *eq;
        struct be_dma_mem *mem;
        struct be_eq_obj *pbe_eq;
        void *cq_vaddress;
+       int ret = -ENOMEM;
        dma_addr_t paddr;
 
        num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
@@ -3347,10 +3102,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
                pbe_eq->phba = phba;
                mem = &cq->dma_mem;
                cq_vaddress = pci_alloc_consistent(phba->pcidev,
-                                                    num_cq_pages * PAGE_SIZE,
-                                                    &paddr);
+                                                  num_cq_pages * PAGE_SIZE,
+                                                  &paddr);
                if (!cq_vaddress)
                        goto create_cq_error;
+
                ret = be_fill_queue(cq, phba->params.num_cq_entries,
                                    sizeof(struct sol_cqe), cq_vaddress);
                if (ret) {
@@ -3385,7 +3141,6 @@ create_cq_error:
                                            mem->va, mem->dma);
        }
        return ret;
-
 }
 
 static int
@@ -3437,7 +3192,6 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
                    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
                    ulp_num,
                    phwi_context->be_def_hdrq[ulp_num].id);
-       hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
        return 0;
 }
 
@@ -3492,11 +3246,9 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
                    ulp_num,
                    phwi_context->be_def_dataq[ulp_num].id);
 
-       hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                    "BM_%d : DEFAULT PDU DATA RING CREATED"
                    "on ULP : %d\n", ulp_num);
-
        return 0;
 }
 
@@ -3716,10 +3468,53 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
 
 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
 {
-       struct be_queue_info *q;
        struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_dma_mem *ptag_mem;
+       struct be_queue_info *q;
+       int i, tag;
 
        q = &phba->ctrl.mcc_obj.q;
+       for (i = 0; i < MAX_MCC_CMD; i++) {
+               tag = i + 1;
+               if (!test_bit(MCC_TAG_STATE_RUNNING,
+                             &ctrl->ptag_state[tag].tag_state))
+                       continue;
+
+               if (test_bit(MCC_TAG_STATE_TIMEOUT,
+                            &ctrl->ptag_state[tag].tag_state)) {
+                       ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+                       if (ptag_mem->size) {
+                               pci_free_consistent(ctrl->pdev,
+                                                   ptag_mem->size,
+                                                   ptag_mem->va,
+                                                   ptag_mem->dma);
+                               ptag_mem->size = 0;
+                       }
+                       continue;
+               }
+               /**
+                * If MCC is still active and waiting then wake up the process.
+                * We are here only because port is going offline. The process
+                * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
+                * returned for the operation and allocated memory cleaned up.
+                */
+               if (waitqueue_active(&ctrl->mcc_wait[tag])) {
+                       ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
+                       ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
+                       wake_up_interruptible(&ctrl->mcc_wait[tag]);
+                       /*
+                        * Control tag info gets reinitialized in enable
+                        * so wait for the process to clear running state.
+                        */
+                       while (test_bit(MCC_TAG_STATE_RUNNING,
+                                       &ctrl->ptag_state[tag].tag_state))
+                               schedule_timeout_uninterruptible(HZ);
+               }
+               /**
+                * For MCC with tag_states MCC_TAG_STATE_ASYNC and
+                * MCC_TAG_STATE_IGNORE nothing needs to done.
+                */
+       }
        if (q->created) {
                beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
                be_queue_free(phba, q);
@@ -3732,68 +3527,6 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
        }
 }
 
-static void hwi_cleanup(struct beiscsi_hba *phba)
-{
-       struct be_queue_info *q;
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_context_memory *phwi_context;
-       struct hwi_async_pdu_context *pasync_ctx;
-       int i, eq_for_mcc, ulp_num;
-
-       phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_context = phwi_ctrlr->phwi_ctxt;
-
-       be_cmd_iscsi_remove_template_hdr(ctrl);
-
-       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-               q = &phwi_context->be_wrbq[i];
-               if (q->created)
-                       beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
-       }
-       kfree(phwi_context->be_wrbq);
-       free_wrb_handles(phba);
-
-       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
-                       q = &phwi_context->be_def_hdrq[ulp_num];
-                       if (q->created)
-                               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
-                       q = &phwi_context->be_def_dataq[ulp_num];
-                       if (q->created)
-                               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
-                       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
-               }
-       }
-
-       beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
-
-       for (i = 0; i < (phba->num_cpus); i++) {
-               q = &phwi_context->be_cq[i];
-               if (q->created) {
-                       be_queue_free(phba, q);
-                       beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
-               }
-       }
-
-       be_mcc_queues_destroy(phba);
-       if (phba->msix_enabled)
-               eq_for_mcc = 1;
-       else
-               eq_for_mcc = 0;
-       for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
-               q = &phwi_context->be_eq[i].q;
-               if (q->created) {
-                       be_queue_free(phba, q);
-                       beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
-               }
-       }
-       be_cmd_fw_uninit(ctrl);
-}
-
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
                                struct hwi_context_memory *phwi_context)
 {
@@ -3875,7 +3608,119 @@ static void find_num_cpus(struct beiscsi_hba *phba)
        }
 }
 
-static int hwi_init_port(struct beiscsi_hba *phba)
+static void hwi_purge_eq(struct beiscsi_hba *phba)
+{
+       struct hwi_controller *phwi_ctrlr;
+       struct hwi_context_memory *phwi_context;
+       struct be_queue_info *eq;
+       struct be_eq_entry *eqe = NULL;
+       int i, eq_msix;
+       unsigned int num_processed;
+
+       if (beiscsi_hba_in_error(phba))
+               return;
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_context = phwi_ctrlr->phwi_ctxt;
+       if (phba->msix_enabled)
+               eq_msix = 1;
+       else
+               eq_msix = 0;
+
+       for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
+               eq = &phwi_context->be_eq[i].q;
+               eqe = queue_tail_node(eq);
+               num_processed = 0;
+               while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+                                       & EQE_VALID_MASK) {
+                       AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+                       queue_tail_inc(eq);
+                       eqe = queue_tail_node(eq);
+                       num_processed++;
+               }
+
+               if (num_processed)
+                       hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
+       }
+}
+
+static void hwi_cleanup_port(struct beiscsi_hba *phba)
+{
+       struct be_queue_info *q;
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct hwi_controller *phwi_ctrlr;
+       struct hwi_context_memory *phwi_context;
+       struct hd_async_context *pasync_ctx;
+       int i, eq_for_mcc, ulp_num;
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+                       beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
+
+       /**
+        * Purge all EQ entries that may have been left out. This is to
+        * workaround a problem we've seen occasionally where driver gets an
+        * interrupt with EQ entry bit set after stopping the controller.
+        */
+       hwi_purge_eq(phba);
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_context = phwi_ctrlr->phwi_ctxt;
+
+       be_cmd_iscsi_remove_template_hdr(ctrl);
+
+       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+               q = &phwi_context->be_wrbq[i];
+               if (q->created)
+                       beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
+       }
+       kfree(phwi_context->be_wrbq);
+       free_wrb_handles(phba);
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+                       q = &phwi_context->be_def_hdrq[ulp_num];
+                       if (q->created)
+                               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+                       q = &phwi_context->be_def_dataq[ulp_num];
+                       if (q->created)
+                               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+                       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+               }
+       }
+
+       beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+
+       for (i = 0; i < (phba->num_cpus); i++) {
+               q = &phwi_context->be_cq[i];
+               if (q->created) {
+                       be_queue_free(phba, q);
+                       beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+               }
+       }
+
+       be_mcc_queues_destroy(phba);
+       if (phba->msix_enabled)
+               eq_for_mcc = 1;
+       else
+               eq_for_mcc = 0;
+       for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+               q = &phwi_context->be_eq[i].q;
+               if (q->created) {
+                       be_queue_free(phba, q);
+                       beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+               }
+       }
+       /* this ensures complete FW cleanup */
+       beiscsi_cmd_function_reset(phba);
+       /* last communication, indicate driver is unloading */
+       beiscsi_cmd_special_wrb(&phba->ctrl, 0);
+}
+
+static int hwi_init_port(struct beiscsi_hba *phba)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
@@ -3887,9 +3732,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
        phwi_context = phwi_ctrlr->phwi_ctxt;
        phwi_context->max_eqd = 128;
        phwi_context->min_eqd = 0;
-       phwi_context->cur_eqd = 0;
-       be_cmd_fw_initialize(&phba->ctrl);
-       /* set optic state to unknown */
+       phwi_context->cur_eqd = 32;
+       /* set port optic state to unknown */
        phba->optic_state = 0xff;
 
        status = beiscsi_create_eqs(phba, phwi_context);
@@ -3903,7 +3747,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
        if (status != 0)
                goto error;
 
-       status = mgmt_check_supported_fw(ctrl, phba);
+       status = beiscsi_check_supported_fw(ctrl, phba);
        if (status != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Unsupported fw version\n");
@@ -3919,7 +3763,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
        for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
                if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
                        def_pdu_ring_sz =
                                BEISCSI_GET_CID_COUNT(phba, ulp_num) *
                                sizeof(struct phys_addr);
@@ -3945,6 +3788,15 @@ static int hwi_init_port(struct beiscsi_hba *phba)
                                            ulp_num);
                                goto error;
                        }
+                       /**
+                        * Now that the default PDU rings have been created,
+                        * let EP know about it.
+                        * Call beiscsi_cmd_iscsi_cleanup before posting?
+                        */
+                       beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
+                                                ulp_num);
+                       beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
+                                                ulp_num);
                }
        }
 
@@ -3973,7 +3825,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
                if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
                        uint16_t cri = 0;
-                       struct hwi_async_pdu_context *pasync_ctx;
+                       struct hd_async_context *pasync_ctx;
 
                        pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
                                     phwi_ctrlr, ulp_num);
@@ -3985,6 +3837,14 @@ static int hwi_init_port(struct beiscsi_hba *phba)
                                        phwi_ctrlr->wrb_context[cri].cid] =
                                        async_arr_idx++;
                        }
+                       /**
+                        * Now that the default PDU rings have been created,
+                        * let EP know about it.
+                        */
+                       beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
+                                                ulp_num);
+                       beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
+                                                ulp_num);
                }
        }
 
@@ -3995,7 +3855,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 error:
        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                    "BM_%d : hwi_init_port failed");
-       hwi_cleanup(phba);
+       hwi_cleanup_port(phba);
        return status;
 }
 
@@ -4354,149 +4214,6 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
                            "BM_%d : In hwi_disable_intr, Already Disabled\n");
 }
 
-/**
- * beiscsi_get_boot_info()- Get the boot session info
- * @phba: The device priv structure instance
- *
- * Get the boot target info and store in driver priv structure
- *
- * return values
- *     Success: 0
- *     Failure: Non-Zero Value
- **/
-static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
-{
-       struct be_cmd_get_session_resp *session_resp;
-       struct be_dma_mem nonemb_cmd;
-       unsigned int tag;
-       unsigned int s_handle;
-       int ret = -ENOMEM;
-
-       /* Get the session handle of the boot target */
-       ret = be_mgmt_get_boot_shandle(phba, &s_handle);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                           "BM_%d : No boot session\n");
-
-               if (ret == -ENXIO)
-                       phba->get_boot = 0;
-
-
-               return ret;
-       }
-       phba->get_boot = 0;
-       nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
-                                             sizeof(*session_resp),
-                                             &nonemb_cmd.dma);
-       if (nonemb_cmd.va == NULL) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                           "BM_%d : Failed to allocate memory for"
-                           "beiscsi_get_session_info\n");
-
-               return -ENOMEM;
-       }
-
-       tag = mgmt_get_session_info(phba, s_handle,
-                                   &nonemb_cmd);
-       if (!tag) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                           "BM_%d : beiscsi_get_session_info"
-                           " Failed\n");
-
-               goto boot_freemem;
-       }
-
-       ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                           "BM_%d : beiscsi_get_session_info Failed");
-
-               if (ret != -EBUSY)
-                       goto boot_freemem;
-               else
-                       return ret;
-       }
-
-       session_resp = nonemb_cmd.va ;
-
-       memcpy(&phba->boot_sess, &session_resp->session_info,
-              sizeof(struct mgmt_session_info));
-
-        beiscsi_logout_fw_sess(phba,
-                               phba->boot_sess.session_handle);
-       ret = 0;
-
-boot_freemem:
-       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                   nonemb_cmd.va, nonemb_cmd.dma);
-       return ret;
-}
-
-static void beiscsi_boot_release(void *data)
-{
-       struct beiscsi_hba *phba = data;
-
-       scsi_host_put(phba->shost);
-}
-
-static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
-{
-       struct iscsi_boot_kobj *boot_kobj;
-
-       /* it has been created previously */
-       if (phba->boot_kset)
-               return 0;
-
-       /* get boot info using mgmt cmd */
-       if (beiscsi_get_boot_info(phba))
-               /* Try to see if we can carry on without this */
-               return 0;
-
-       phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
-       if (!phba->boot_kset)
-               return -ENOMEM;
-
-       /* get a ref because the show function will ref the phba */
-       if (!scsi_host_get(phba->shost))
-               goto free_kset;
-       boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
-                                            beiscsi_show_boot_tgt_info,
-                                            beiscsi_tgt_get_attr_visibility,
-                                            beiscsi_boot_release);
-       if (!boot_kobj)
-               goto put_shost;
-
-       if (!scsi_host_get(phba->shost))
-               goto free_kset;
-       boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
-                                               beiscsi_show_boot_ini_info,
-                                               beiscsi_ini_get_attr_visibility,
-                                               beiscsi_boot_release);
-       if (!boot_kobj)
-               goto put_shost;
-
-       if (!scsi_host_get(phba->shost))
-               goto free_kset;
-       boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
-                                              beiscsi_show_boot_eth_info,
-                                              beiscsi_eth_get_attr_visibility,
-                                              beiscsi_boot_release);
-       if (!boot_kobj)
-               goto put_shost;
-       return 0;
-
-put_shost:
-       scsi_host_put(phba->shost);
-free_kset:
-       iscsi_boot_destroy_kset(phba->boot_kset);
-       phba->boot_kset = NULL;
-       return -ENOMEM;
-}
-
 static int beiscsi_init_port(struct beiscsi_hba *phba)
 {
        int ret;
@@ -4516,7 +4233,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
                goto do_cleanup_ctrlr;
        }
 
-       if (hba_setup_cid_tbls(phba)) {
+       ret = hba_setup_cid_tbls(phba);
+       if (ret < 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed in hba_setup_cid_tbls\n");
                kfree(phba->io_sgl_hndl_base);
@@ -4527,61 +4245,15 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
        return ret;
 
 do_cleanup_ctrlr:
-       hwi_cleanup(phba);
+       hwi_cleanup_port(phba);
        return ret;
 }
 
-static void hwi_purge_eq(struct beiscsi_hba *phba)
-{
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_context_memory *phwi_context;
-       struct be_queue_info *eq;
-       struct be_eq_entry *eqe = NULL;
-       int i, eq_msix;
-       unsigned int num_processed;
-
-       phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_context = phwi_ctrlr->phwi_ctxt;
-       if (phba->msix_enabled)
-               eq_msix = 1;
-       else
-               eq_msix = 0;
-
-       for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
-               eq = &phwi_context->be_eq[i].q;
-               eqe = queue_tail_node(eq);
-               num_processed = 0;
-               while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
-                                       & EQE_VALID_MASK) {
-                       AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
-                       queue_tail_inc(eq);
-                       eqe = queue_tail_node(eq);
-                       num_processed++;
-               }
-
-               if (num_processed)
-                       hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
-       }
-}
-
-static void beiscsi_clean_port(struct beiscsi_hba *phba)
+static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
 {
-       int mgmt_status, ulp_num;
        struct ulp_cid_info *ptr_cid_info = NULL;
+       int ulp_num;
 
-       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
-                       mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
-                       if (mgmt_status)
-                               beiscsi_log(phba, KERN_WARNING,
-                                           BEISCSI_LOG_INIT,
-                                           "BM_%d : mgmt_epfw_cleanup FAILED"
-                                           " for ULP_%d\n", ulp_num);
-               }
-       }
-
-       hwi_purge_eq(phba);
-       hwi_cleanup(phba);
        kfree(phba->io_sgl_hndl_base);
        kfree(phba->eh_sgl_hndl_base);
        kfree(phba->ep_array);
@@ -4598,7 +4270,6 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
                        }
                }
        }
-
 }
 
 /**
@@ -4625,16 +4296,12 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
        io_task = task->dd_data;
 
        if (io_task->pwrb_handle) {
-               memset(io_task->pwrb_handle->pwrb, 0,
-                      sizeof(struct iscsi_wrb));
-               free_wrb_handle(phba, pwrb_context,
-                               io_task->pwrb_handle);
+               free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
                io_task->pwrb_handle = NULL;
        }
 
        if (io_task->psgl_handle) {
-               free_mgmt_sgl_handle(phba,
-                                    io_task->psgl_handle);
+               free_mgmt_sgl_handle(phba, io_task->psgl_handle);
                io_task->psgl_handle = NULL;
        }
 
@@ -4671,6 +4338,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
                              io_task->bhs_pa.u.a64.address);
                io_task->cmd_bhs = NULL;
+               task->hdr = NULL;
        }
 
        if (task->sc) {
@@ -4686,7 +4354,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                }
 
                if (io_task->scsi_cmnd) {
-                       scsi_dma_unmap(io_task->scsi_cmnd);
+                       if (io_task->num_sg)
+                               scsi_dma_unmap(io_task->scsi_cmnd);
                        io_task->scsi_cmnd = NULL;
                }
        } else {
@@ -5051,7 +4720,6 @@ static int beiscsi_mtask(struct iscsi_task *task)
 
        cid = beiscsi_conn->beiscsi_conn_cid;
        pwrb = io_task->pwrb_handle->pwrb;
-       memset(pwrb, 0, sizeof(*pwrb));
 
        if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
@@ -5165,6 +4833,15 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
        int num_sg;
        unsigned int  writedir = 0, xferlen = 0;
 
+       phba = io_task->conn->phba;
+       /**
+        * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
+        * operational if FW still gets heartbeat from EP FW. Is management
+        * path really needed to continue further?
+        */
+       if (!beiscsi_hba_is_online(phba))
+               return -EIO;
+
        if (!io_task->conn->login_in_progress)
                task->hdr->exp_statsn = 0;
 
@@ -5172,8 +4849,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
                return beiscsi_mtask(task);
 
        io_task->scsi_cmnd = sc;
+       io_task->num_sg = 0;
        num_sg = scsi_dma_map(sc);
-       phba = io_task->conn->phba;
        if (num_sg < 0) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
@@ -5184,6 +4861,11 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
 
                return num_sg;
        }
+       /**
+        * For scsi cmd task, check num_sg before unmapping in cleanup_task.
+        * For management task, cleanup_task checks mtask_addr before unmapping.
+        */
+       io_task->num_sg = num_sg;
        xferlen = scsi_bufflen(sc);
        sg = scsi_sglist(sc);
        if (sc->sc_data_direction == DMA_TO_DEVICE)
@@ -5213,6 +4895,12 @@ static int beiscsi_bsg_request(struct bsg_job *job)
        shost = iscsi_job_to_shost(job);
        phba = iscsi_host_priv(shost);
 
+       if (!beiscsi_hba_is_online(phba)) {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BM_%d : HBA in error 0x%lx\n", phba->state);
+               return -ENXIO;
+       }
+
        switch (bsg_req->msgcode) {
        case ISCSI_BSG_HST_VENDOR:
                nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -5240,6 +4928,14 @@ static int beiscsi_bsg_request(struct bsg_job *job)
                                        phba->ctrl.mcc_tag_status[tag],
                                        msecs_to_jiffies(
                                        BEISCSI_HOST_MBX_TIMEOUT));
+
+               if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+                       clear_bit(MCC_TAG_STATE_RUNNING,
+                                 &phba->ctrl.ptag_state[tag].tag_state);
+                       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                                           nonemb_cmd.va, nonemb_cmd.dma);
+                       return -EIO;
+               }
                extd_status = (phba->ctrl.mcc_tag_status[tag] &
                               CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
                status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
@@ -5283,106 +4979,294 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
        beiscsi_log_enable_init(phba, beiscsi_log_enable);
 }
 
-/*
- * beiscsi_quiesce()- Cleanup Driver resources
- * @phba: Instance Priv structure
- * @unload_state:i Clean or EEH unload state
- *
- * Free the OS and HW resources held by the driver
- **/
-static void beiscsi_quiesce(struct beiscsi_hba *phba,
-               uint32_t unload_state)
+void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
 {
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_context_memory *phwi_context;
-       struct be_eq_obj *pbe_eq;
-       unsigned int i, msix_vec;
+       if (phba->boot_struct.boot_kset)
+               return;
 
-       phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_context = phwi_ctrlr->phwi_ctxt;
-       hwi_disable_intr(phba);
-       if (phba->msix_enabled) {
-               for (i = 0; i <= phba->num_cpus; i++) {
-                       msix_vec = phba->msix_entries[i].vector;
-                       free_irq(msix_vec, &phwi_context->be_eq[i]);
-                       kfree(phba->msi_name[i]);
-               }
-       } else
-               if (phba->pcidev->irq)
-                       free_irq(phba->pcidev->irq, phba);
-       pci_disable_msix(phba->pcidev);
-       cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
+       /* skip if boot work is already in progress */
+       if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
+               return;
 
-       for (i = 0; i < phba->num_cpus; i++) {
-               pbe_eq = &phwi_context->be_eq[i];
-               irq_poll_disable(&pbe_eq->iopoll);
+       phba->boot_struct.retry = 3;
+       phba->boot_struct.tag = 0;
+       phba->boot_struct.s_handle = s_handle;
+       phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
+       schedule_work(&phba->boot_work);
+}
+
+static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
+{
+       struct beiscsi_hba *phba = data;
+       struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
+       struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
+       char *str = buf;
+       int rc = -EPERM;
+
+       switch (type) {
+       case ISCSI_BOOT_TGT_NAME:
+               rc = sprintf(buf, "%.*s\n",
+                           (int)strlen(boot_sess->target_name),
+                           (char *)&boot_sess->target_name);
+               break;
+       case ISCSI_BOOT_TGT_IP_ADDR:
+               if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
+                       rc = sprintf(buf, "%pI4\n",
+                               (char *)&boot_conn->dest_ipaddr.addr);
+               else
+                       rc = sprintf(str, "%pI6\n",
+                               (char *)&boot_conn->dest_ipaddr.addr);
+               break;
+       case ISCSI_BOOT_TGT_PORT:
+               rc = sprintf(str, "%d\n", boot_conn->dest_port);
+               break;
+
+       case ISCSI_BOOT_TGT_CHAP_NAME:
+               rc = sprintf(str,  "%.*s\n",
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            target_chap_name_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.target_chap_name);
+               break;
+       case ISCSI_BOOT_TGT_CHAP_SECRET:
+               rc = sprintf(str,  "%.*s\n",
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            target_secret_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.target_secret);
+               break;
+       case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+               rc = sprintf(str,  "%.*s\n",
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            intr_chap_name_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.intr_chap_name);
+               break;
+       case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+               rc = sprintf(str,  "%.*s\n",
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            intr_secret_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.intr_secret);
+               break;
+       case ISCSI_BOOT_TGT_FLAGS:
+               rc = sprintf(str, "2\n");
+               break;
+       case ISCSI_BOOT_TGT_NIC_ASSOC:
+               rc = sprintf(str, "0\n");
+               break;
        }
+       return rc;
+}
 
-       if (unload_state == BEISCSI_CLEAN_UNLOAD) {
-               destroy_workqueue(phba->wq);
-               beiscsi_clean_port(phba);
-               beiscsi_free_mem(phba);
+static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
+{
+       struct beiscsi_hba *phba = data;
+       char *str = buf;
+       int rc = -EPERM;
 
-               beiscsi_unmap_pci_function(phba);
-               pci_free_consistent(phba->pcidev,
-                                   phba->ctrl.mbox_mem_alloced.size,
-                                   phba->ctrl.mbox_mem_alloced.va,
-                                   phba->ctrl.mbox_mem_alloced.dma);
-       } else {
-               hwi_purge_eq(phba);
-               hwi_cleanup(phba);
+       switch (type) {
+       case ISCSI_BOOT_INI_INITIATOR_NAME:
+               rc = sprintf(str, "%s\n",
+                            phba->boot_struct.boot_sess.initiator_iscsiname);
+               break;
        }
+       return rc;
+}
+
+static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
+{
+       struct beiscsi_hba *phba = data;
+       char *str = buf;
+       int rc = -EPERM;
 
+       switch (type) {
+       case ISCSI_BOOT_ETH_FLAGS:
+               rc = sprintf(str, "2\n");
+               break;
+       case ISCSI_BOOT_ETH_INDEX:
+               rc = sprintf(str, "0\n");
+               break;
+       case ISCSI_BOOT_ETH_MAC:
+               rc  = beiscsi_get_macaddr(str, phba);
+               break;
+       }
+       return rc;
 }
 
-static void beiscsi_remove(struct pci_dev *pcidev)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
 {
-       struct beiscsi_hba *phba = NULL;
+       umode_t rc = 0;
 
-       phba = pci_get_drvdata(pcidev);
-       if (!phba) {
-               dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
-               return;
+       switch (type) {
+       case ISCSI_BOOT_TGT_NAME:
+       case ISCSI_BOOT_TGT_IP_ADDR:
+       case ISCSI_BOOT_TGT_PORT:
+       case ISCSI_BOOT_TGT_CHAP_NAME:
+       case ISCSI_BOOT_TGT_CHAP_SECRET:
+       case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+       case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+       case ISCSI_BOOT_TGT_NIC_ASSOC:
+       case ISCSI_BOOT_TGT_FLAGS:
+               rc = S_IRUGO;
+               break;
        }
+       return rc;
+}
 
-       beiscsi_destroy_def_ifaces(phba);
-       iscsi_boot_destroy_kset(phba->boot_kset);
-       iscsi_host_remove(phba->shost);
-       beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
-       pci_dev_put(phba->pcidev);
-       iscsi_host_free(phba->shost);
-       pci_disable_pcie_error_reporting(pcidev);
-       pci_set_drvdata(pcidev, NULL);
-       pci_release_regions(pcidev);
-       pci_disable_device(pcidev);
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+{
+       umode_t rc = 0;
+
+       switch (type) {
+       case ISCSI_BOOT_INI_INITIATOR_NAME:
+               rc = S_IRUGO;
+               break;
+       }
+       return rc;
 }
 
-static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
 {
-       int i, status;
+       umode_t rc = 0;
 
-       for (i = 0; i <= phba->num_cpus; i++)
-               phba->msix_entries[i].entry = i;
+       switch (type) {
+       case ISCSI_BOOT_ETH_FLAGS:
+       case ISCSI_BOOT_ETH_MAC:
+       case ISCSI_BOOT_ETH_INDEX:
+               rc = S_IRUGO;
+               break;
+       }
+       return rc;
+}
 
-       status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
-                                      phba->num_cpus + 1, phba->num_cpus + 1);
-       if (status > 0)
-               phba->msix_enabled = true;
+static void beiscsi_boot_kobj_release(void *data)
+{
+       struct beiscsi_hba *phba = data;
+
+       scsi_host_put(phba->shost);
+}
+
+static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
+{
+       struct boot_struct *bs = &phba->boot_struct;
+       struct iscsi_boot_kobj *boot_kobj;
+
+       if (bs->boot_kset) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d: boot_kset already created\n");
+               return 0;
+       }
+
+       bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+       if (!bs->boot_kset) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d: boot_kset alloc failed\n");
+               return -ENOMEM;
+       }
+
+       /* get shost ref because the show function will refer phba */
+       if (!scsi_host_get(phba->shost))
+               goto free_kset;
+
+       boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
+                                            beiscsi_show_boot_tgt_info,
+                                            beiscsi_tgt_get_attr_visibility,
+                                            beiscsi_boot_kobj_release);
+       if (!boot_kobj)
+               goto put_shost;
+
+       if (!scsi_host_get(phba->shost))
+               goto free_kset;
+
+       boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
+                                               beiscsi_show_boot_ini_info,
+                                               beiscsi_ini_get_attr_visibility,
+                                               beiscsi_boot_kobj_release);
+       if (!boot_kobj)
+               goto put_shost;
+
+       if (!scsi_host_get(phba->shost))
+               goto free_kset;
+
+       boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
+                                              beiscsi_show_boot_eth_info,
+                                              beiscsi_eth_get_attr_visibility,
+                                              beiscsi_boot_kobj_release);
+       if (!boot_kobj)
+               goto put_shost;
+
+       return 0;
+
+put_shost:
+       scsi_host_put(phba->shost);
+free_kset:
+       iscsi_boot_destroy_kset(bs->boot_kset);
+       bs->boot_kset = NULL;
+       return -ENOMEM;
+}
+
+static void beiscsi_boot_work(struct work_struct *work)
+{
+       struct beiscsi_hba *phba =
+               container_of(work, struct beiscsi_hba, boot_work);
+       struct boot_struct *bs = &phba->boot_struct;
+       unsigned int tag = 0;
 
-       return;
+       if (!beiscsi_hba_is_online(phba))
+               return;
+
+       beiscsi_log(phba, KERN_INFO,
+                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BM_%d : %s action %d\n",
+                   __func__, phba->boot_struct.action);
+
+       switch (phba->boot_struct.action) {
+       case BEISCSI_BOOT_REOPEN_SESS:
+               tag = beiscsi_boot_reopen_sess(phba);
+               break;
+       case BEISCSI_BOOT_GET_SHANDLE:
+               tag = __beiscsi_boot_get_shandle(phba, 1);
+               break;
+       case BEISCSI_BOOT_GET_SINFO:
+               tag = beiscsi_boot_get_sinfo(phba);
+               break;
+       case BEISCSI_BOOT_LOGOUT_SESS:
+               tag = beiscsi_boot_logout_sess(phba);
+               break;
+       case BEISCSI_BOOT_CREATE_KSET:
+               beiscsi_boot_create_kset(phba);
+               /**
+                * updated boot_kset is made visible to all before
+                * ending the boot work.
+                */
+               mb();
+               clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+               return;
+       }
+       if (!tag) {
+               if (bs->retry--)
+                       schedule_work(&phba->boot_work);
+               else
+                       clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+       }
 }
 
-static void be_eqd_update(struct beiscsi_hba *phba)
+static void beiscsi_eqd_update_work(struct work_struct *work)
 {
+       struct hwi_context_memory *phwi_context;
        struct be_set_eqd set_eqd[MAX_CPUS];
-       struct be_aic_obj *aic;
-       struct be_eq_obj *pbe_eq;
        struct hwi_controller *phwi_ctrlr;
-       struct hwi_context_memory *phwi_context;
+       struct be_eq_obj *pbe_eq;
+       struct beiscsi_hba *phba;
+       unsigned int pps, delta;
+       struct be_aic_obj *aic;
        int eqd, i, num = 0;
-       ulong now;
-       u32 pps, delta;
-       unsigned int tag;
+       unsigned long now;
+
+       phba = container_of(work, struct beiscsi_hba, eqd_update.work);
+       if (!beiscsi_hba_is_online(phba))
+               return;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -5391,13 +5275,13 @@ static void be_eqd_update(struct beiscsi_hba *phba)
                aic = &phba->aic_obj[i];
                pbe_eq = &phwi_context->be_eq[i];
                now = jiffies;
-               if (!aic->jiffs || time_before(now, aic->jiffs) ||
+               if (!aic->jiffies || time_before(now, aic->jiffies) ||
                    pbe_eq->cq_count < aic->eq_prev) {
-                       aic->jiffs = now;
+                       aic->jiffies = now;
                        aic->eq_prev = pbe_eq->cq_count;
                        continue;
                }
-               delta = jiffies_to_msecs(now - aic->jiffs);
+               delta = jiffies_to_msecs(now - aic->jiffies);
                pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
                eqd = (pps / 1500) << 2;
 
@@ -5406,7 +5290,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
                eqd = min_t(u32, eqd, phwi_context->max_eqd);
                eqd = max_t(u32, eqd, phwi_context->min_eqd);
 
-               aic->jiffs = now;
+               aic->jiffies = now;
                aic->eq_prev = pbe_eq->cq_count;
 
                if (eqd != aic->prev_eqd) {
@@ -5416,53 +5300,242 @@ static void be_eqd_update(struct beiscsi_hba *phba)
                        num++;
                }
        }
-       if (num) {
-               tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
-               if (tag)
-                       beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+       if (num)
+               /* completion of this is ignored */
+               beiscsi_modify_eq_delay(phba, set_eqd, num);
+
+       schedule_delayed_work(&phba->eqd_update,
+                             msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+}
+
+static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+{
+       int i, status;
+
+       for (i = 0; i <= phba->num_cpus; i++)
+               phba->msix_entries[i].entry = i;
+
+       status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
+                                      phba->num_cpus + 1, phba->num_cpus + 1);
+       if (status > 0)
+               phba->msix_enabled = true;
+}
+
+static void beiscsi_hw_tpe_check(unsigned long ptr)
+{
+       struct beiscsi_hba *phba;
+       u32 wait;
+
+       phba = (struct beiscsi_hba *)ptr;
+       /* if not TPE, do nothing */
+       if (!beiscsi_detect_tpe(phba))
+               return;
+
+       /* wait default 4000ms before recovering */
+       wait = 4000;
+       if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
+               wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
+       queue_delayed_work(phba->wq, &phba->recover_port,
+                          msecs_to_jiffies(wait));
+}
+
+static void beiscsi_hw_health_check(unsigned long ptr)
+{
+       struct beiscsi_hba *phba;
+
+       phba = (struct beiscsi_hba *)ptr;
+       beiscsi_detect_ue(phba);
+       if (beiscsi_detect_ue(phba)) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d : port in error: %lx\n", phba->state);
+               /* sessions are no longer valid, so first fail the sessions */
+               queue_work(phba->wq, &phba->sess_work);
+
+               /* detect UER supported */
+               if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
+                       return;
+               /* modify this timer to check TPE */
+               phba->hw_check.function = beiscsi_hw_tpe_check;
        }
+
+       mod_timer(&phba->hw_check,
+                 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
 }
 
-static void be_check_boot_session(struct beiscsi_hba *phba)
+/*
+ * beiscsi_enable_port()- Enables the disabled port.
+ * Only port resources freed in disable function are reallocated.
+ * This is called in HBA error handling path.
+ *
+ * @phba: Instance of driver private structure
+ *
+ **/
+static int beiscsi_enable_port(struct beiscsi_hba *phba)
 {
-       if (beiscsi_setup_boot_info(phba))
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Could not set up "
-                           "iSCSI boot info on async event.\n");
+       struct hwi_context_memory *phwi_context;
+       struct hwi_controller *phwi_ctrlr;
+       struct be_eq_obj *pbe_eq;
+       int ret, i;
+
+       if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d : %s : port is online %lx\n",
+                             __func__, phba->state);
+               return 0;
+       }
+
+       ret = beiscsi_init_sliport(phba);
+       if (ret)
+               return ret;
+
+       if (enable_msix)
+               find_num_cpus(phba);
+       else
+               phba->num_cpus = 1;
+       if (enable_msix) {
+               beiscsi_msix_enable(phba);
+               if (!phba->msix_enabled)
+                       phba->num_cpus = 1;
+       }
+
+       beiscsi_get_params(phba);
+       /* Re-enable UER. If different TPE occurs then it is recoverable. */
+       beiscsi_set_uer_feature(phba);
+
+       phba->shost->max_id = phba->params.cxns_per_ctrl;
+       phba->shost->can_queue = phba->params.ios_per_ctrl;
+       ret = hwi_init_controller(phba);
+       if (ret) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d : init controller failed %d\n", ret);
+               goto disable_msix;
+       }
+
+       for (i = 0; i < MAX_MCC_CMD; i++) {
+               init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+               phba->ctrl.mcc_tag[i] = i + 1;
+               phba->ctrl.mcc_tag_status[i + 1] = 0;
+               phba->ctrl.mcc_tag_available++;
+       }
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_context = phwi_ctrlr->phwi_ctxt;
+       for (i = 0; i < phba->num_cpus; i++) {
+               pbe_eq = &phwi_context->be_eq[i];
+               irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
+       }
+
+       i = (phba->msix_enabled) ? i : 0;
+       /* Work item for MCC handling */
+       pbe_eq = &phwi_context->be_eq[i];
+       INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
+
+       ret = beiscsi_init_irqs(phba);
+       if (ret < 0) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d : setup IRQs failed %d\n", ret);
+               goto cleanup_port;
+       }
+       hwi_enable_intr(phba);
+       /* port operational: clear all error bits */
+       set_bit(BEISCSI_HBA_ONLINE, &phba->state);
+       __beiscsi_log(phba, KERN_INFO,
+                     "BM_%d : port online: 0x%lx\n", phba->state);
+
+       /* start hw_check timer and eqd_update work */
+       schedule_delayed_work(&phba->eqd_update,
+                             msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+
+       /**
+        * Timer function gets modified for TPE detection.
+        * Always reinit to do health check first.
+        */
+       phba->hw_check.function = beiscsi_hw_health_check;
+       mod_timer(&phba->hw_check,
+                 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
+       return 0;
+
+cleanup_port:
+       for (i = 0; i < phba->num_cpus; i++) {
+               pbe_eq = &phwi_context->be_eq[i];
+               irq_poll_disable(&pbe_eq->iopoll);
+       }
+       hwi_cleanup_port(phba);
+
+disable_msix:
+       if (phba->msix_enabled)
+               pci_disable_msix(phba->pcidev);
+
+       return ret;
 }
 
 /*
- * beiscsi_hw_health_check()- Check adapter health
- * @work: work item to check HW health
+ * beiscsi_disable_port()- Disable port and cleanup driver resources.
+ * This is called in HBA error handling and driver removal.
+ * @phba: Instance Priv structure
+ * @unload: indicate driver is unloading
  *
- * Check if adapter in an unrecoverable state or not.
+ * Free the OS and HW resources held by the driver
  **/
-static void
-beiscsi_hw_health_check(struct work_struct *work)
+static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
 {
-       struct beiscsi_hba *phba =
-               container_of(work, struct beiscsi_hba,
-                            beiscsi_hw_check_task.work);
+       struct hwi_context_memory *phwi_context;
+       struct hwi_controller *phwi_ctrlr;
+       struct be_eq_obj *pbe_eq;
+       unsigned int i, msix_vec;
 
-       be_eqd_update(phba);
+       if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
+               return;
 
-       if (phba->state & BE_ADAPTER_CHECK_BOOT) {
-               if ((phba->get_boot > 0) && (!phba->boot_kset)) {
-                       phba->get_boot--;
-                       if (!(phba->get_boot % BE_GET_BOOT_TO))
-                               be_check_boot_session(phba);
-               } else {
-                       phba->state &= ~BE_ADAPTER_CHECK_BOOT;
-                       phba->get_boot = 0;
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_context = phwi_ctrlr->phwi_ctxt;
+       hwi_disable_intr(phba);
+       if (phba->msix_enabled) {
+               for (i = 0; i <= phba->num_cpus; i++) {
+                       msix_vec = phba->msix_entries[i].vector;
+                       free_irq(msix_vec, &phwi_context->be_eq[i]);
+                       kfree(phba->msi_name[i]);
                }
+       } else
+               if (phba->pcidev->irq)
+                       free_irq(phba->pcidev->irq, phba);
+       pci_disable_msix(phba->pcidev);
+
+       for (i = 0; i < phba->num_cpus; i++) {
+               pbe_eq = &phwi_context->be_eq[i];
+               irq_poll_disable(&pbe_eq->iopoll);
+       }
+       cancel_delayed_work_sync(&phba->eqd_update);
+       cancel_work_sync(&phba->boot_work);
+       /* WQ might be running cancel queued mcc_work if we are not exiting */
+       if (!unload && beiscsi_hba_in_error(phba)) {
+               pbe_eq = &phwi_context->be_eq[i];
+               cancel_work_sync(&pbe_eq->mcc_work);
        }
+       hwi_cleanup_port(phba);
+}
 
-       beiscsi_ue_detect(phba);
+static void beiscsi_sess_work(struct work_struct *work)
+{
+       struct beiscsi_hba *phba;
 
-       schedule_delayed_work(&phba->beiscsi_hw_check_task,
-                             msecs_to_jiffies(1000));
+       phba = container_of(work, struct beiscsi_hba, sess_work);
+       /*
+        * This work gets scheduled only in case of HBA error.
+        * Old sessions are gone so need to be re-established.
+        * iscsi_session_failure needs process context hence this work.
+        */
+       iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
 }
 
+static void beiscsi_recover_port(struct work_struct *work)
+{
+       struct beiscsi_hba *phba;
+
+       phba = container_of(work, struct beiscsi_hba, recover_port.work);
+       beiscsi_disable_port(phba, 0);
+       beiscsi_enable_port(phba);
+}
 
 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
                pci_channel_state_t state)
@@ -5470,12 +5543,18 @@ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
        struct beiscsi_hba *phba = NULL;
 
        phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
-       phba->state |= BE_ADAPTER_PCI_ERR;
+       set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
 
        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                    "BM_%d : EEH error detected\n");
 
-       beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+       /* first stop UE detection when PCI error detected */
+       del_timer_sync(&phba->hw_check);
+       cancel_delayed_work_sync(&phba->recover_port);
+
+       /* sessions are no longer valid, so first fail the sessions */
+       iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
+       beiscsi_disable_port(phba, 0);
 
        if (state == pci_channel_io_perm_failure) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5515,9 +5594,8 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
-       /* Wait for the CHIP Reset to complete */
-       status = be_chk_reset_complete(phba);
-       if (!status) {
+       status = beiscsi_check_fw_rdy(phba);
+       if (status) {
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
                            "BM_%d : EEH Reset Completed\n");
        } else {
@@ -5532,87 +5610,16 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
 
 static void beiscsi_eeh_resume(struct pci_dev *pdev)
 {
-       int ret = 0, i;
-       struct be_eq_obj *pbe_eq;
-       struct beiscsi_hba *phba = NULL;
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_context_memory *phwi_context;
+       struct beiscsi_hba *phba;
+       int ret;
 
        phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
        pci_save_state(pdev);
 
-       if (enable_msix)
-               find_num_cpus(phba);
-       else
-               phba->num_cpus = 1;
-
-       if (enable_msix) {
-               beiscsi_msix_enable(phba);
-               if (!phba->msix_enabled)
-                       phba->num_cpus = 1;
-       }
-
-       ret = beiscsi_cmd_reset_function(phba);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Reset Failed\n");
-               goto ret_err;
-       }
-
-       ret = be_chk_reset_complete(phba);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Failed to get out of reset.\n");
-               goto ret_err;
-       }
-
-       beiscsi_get_params(phba);
-       phba->shost->max_id = phba->params.cxns_per_ctrl;
-       phba->shost->can_queue = phba->params.ios_per_ctrl;
-       ret = hwi_init_controller(phba);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_eeh_resume -"
-                            "Failed to initialize beiscsi_hba.\n");
-               goto ret_err;
-       }
-
-       for (i = 0; i < MAX_MCC_CMD; i++) {
-               init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
-               phba->ctrl.mcc_tag[i] = i + 1;
-               phba->ctrl.mcc_tag_status[i + 1] = 0;
-               phba->ctrl.mcc_tag_available++;
-       }
-
-       phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_context = phwi_ctrlr->phwi_ctxt;
-
-       for (i = 0; i < phba->num_cpus; i++) {
-               pbe_eq = &phwi_context->be_eq[i];
-               irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
-                               be_iopoll);
-       }
-
-       i = (phba->msix_enabled) ? i : 0;
-       /* Work item for MCC handling */
-       pbe_eq = &phwi_context->be_eq[i];
-       INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
-
-       ret = beiscsi_init_irqs(phba);
-       if (ret < 0) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_eeh_resume - "
-                           "Failed to beiscsi_init_irqs\n");
-               goto ret_err;
-       }
-
-       hwi_enable_intr(phba);
-       phba->state &= ~BE_ADAPTER_PCI_ERR;
-
-       return;
-ret_err:
-       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                   "BM_%d : AER EEH Resume Failed\n");
+       ret = beiscsi_enable_port(phba);
+       if (ret)
+               __beiscsi_log(phba, KERN_ERR,
+                             "BM_%d : AER EEH resume failed\n");
 }
 
 static int beiscsi_dev_probe(struct pci_dev *pcidev,
@@ -5622,7 +5629,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
        struct be_eq_obj *pbe_eq;
-       int ret = 0, i;
+       unsigned int s_handle;
+       int ret, i;
 
        ret = beiscsi_enable_pci(pcidev);
        if (ret < 0) {
@@ -5635,6 +5643,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        if (!phba) {
                dev_err(&pcidev->dev,
                        "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
+               ret = -ENOMEM;
                goto disable_pci;
        }
 
@@ -5650,10 +5659,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        /* Initialize Driver configuration Paramters */
        beiscsi_hba_attrs_init(phba);
 
-       phba->fw_timeout = false;
        phba->mac_addr_set = false;
 
-
        switch (pcidev->device) {
        case BE_DEVICE_ID1:
        case OC_DEVICE_ID1:
@@ -5677,39 +5684,26 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        ret = be_ctrl_init(phba, pcidev);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_dev_probe-"
-                           "Failed in be_ctrl_init\n");
+                           "BM_%d : be_ctrl_init failed\n");
                goto hba_free;
        }
 
-       /*
-        * FUNCTION_RESET should clean up any stale info in FW for this fn
-        */
-       ret = beiscsi_cmd_reset_function(phba);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Reset Failed\n");
-               goto hba_free;
-       }
-       ret = be_chk_reset_complete(phba);
-       if (ret) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Failed to get out of reset.\n");
+       ret = beiscsi_init_sliport(phba);
+       if (ret)
                goto hba_free;
-       }
 
        spin_lock_init(&phba->io_sgl_lock);
        spin_lock_init(&phba->mgmt_sgl_lock);
-       spin_lock_init(&phba->isr_lock);
        spin_lock_init(&phba->async_pdu_lock);
-       ret = mgmt_get_fw_config(&phba->ctrl, phba);
+       ret = beiscsi_get_fw_config(&phba->ctrl, phba);
        if (ret != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Error getting fw config\n");
                goto free_port;
        }
-       mgmt_get_port_name(&phba->ctrl, phba);
+       beiscsi_get_port_name(&phba->ctrl, phba);
        beiscsi_get_params(phba);
+       beiscsi_set_uer_feature(phba);
 
        if (enable_msix)
                find_num_cpus(phba);
@@ -5754,25 +5748,24 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : beiscsi_dev_probe-"
                            "Failed to allocate work queue\n");
+               ret = -ENOMEM;
                goto free_twq;
        }
 
-       INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
-                         beiscsi_hw_health_check);
+       INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
 
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
 
        for (i = 0; i < phba->num_cpus; i++) {
                pbe_eq = &phwi_context->be_eq[i];
-               irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
-                               be_iopoll);
+               irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
        }
 
        i = (phba->msix_enabled) ? i : 0;
        /* Work item for MCC handling */
        pbe_eq = &phwi_context->be_eq[i];
-       INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+       INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
 
        ret = beiscsi_init_irqs(phba);
        if (ret < 0) {
@@ -5783,22 +5776,42 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        }
        hwi_enable_intr(phba);
 
-       if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
+       ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
+       if (ret)
                goto free_blkenbld;
 
-       if (beiscsi_setup_boot_info(phba))
-               /*
-                * log error but continue, because we may not be using
-                * iscsi boot.
+       /* set online bit after port is operational */
+       set_bit(BEISCSI_HBA_ONLINE, &phba->state);
+       __beiscsi_log(phba, KERN_INFO,
+                     "BM_%d : port online: 0x%lx\n", phba->state);
+
+       INIT_WORK(&phba->boot_work, beiscsi_boot_work);
+       ret = beiscsi_boot_get_shandle(phba, &s_handle);
+       if (ret > 0) {
+               beiscsi_start_boot_work(phba, s_handle);
+               /**
+                * Set this bit after starting the work to let
+                * probe handle it first.
+                * ASYNC event can too schedule this work.
                 */
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Could not set up "
-                           "iSCSI boot info.\n");
+               set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
+       }
 
-       beiscsi_create_def_ifaces(phba);
-       schedule_delayed_work(&phba->beiscsi_hw_check_task,
-                             msecs_to_jiffies(1000));
+       beiscsi_iface_create_default(phba);
+       schedule_delayed_work(&phba->eqd_update,
+                             msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
 
+       INIT_WORK(&phba->sess_work, beiscsi_sess_work);
+       INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
+       /**
+        * Start UE detection here. UE before this will cause stall in probe
+        * and eventually fail the probe.
+        */
+       init_timer(&phba->hw_check);
+       phba->hw_check.function = beiscsi_hw_health_check;
+       phba->hw_check.data = (unsigned long)phba;
+       mod_timer(&phba->hw_check,
+                 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                    "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
        return 0;
@@ -5810,7 +5823,8 @@ free_blkenbld:
                irq_poll_disable(&pbe_eq->iopoll);
        }
 free_twq:
-       beiscsi_clean_port(phba);
+       hwi_cleanup_port(phba);
+       beiscsi_cleanup_port(phba);
        beiscsi_free_mem(phba);
 free_port:
        pci_free_consistent(phba->pcidev,
@@ -5830,6 +5844,49 @@ disable_pci:
        return ret;
 }
 
+static void beiscsi_remove(struct pci_dev *pcidev)
+{
+       struct beiscsi_hba *phba = NULL;
+
+       phba = pci_get_drvdata(pcidev);
+       if (!phba) {
+               dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
+               return;
+       }
+
+       /* first stop UE detection before unloading */
+       del_timer_sync(&phba->hw_check);
+       cancel_delayed_work_sync(&phba->recover_port);
+       cancel_work_sync(&phba->sess_work);
+
+       beiscsi_iface_destroy_default(phba);
+       iscsi_host_remove(phba->shost);
+       beiscsi_disable_port(phba, 1);
+
+       /* after cancelling boot_work */
+       iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
+
+       /* free all resources */
+       destroy_workqueue(phba->wq);
+       beiscsi_cleanup_port(phba);
+       beiscsi_free_mem(phba);
+
+       /* ctrl uninit */
+       beiscsi_unmap_pci_function(phba);
+       pci_free_consistent(phba->pcidev,
+                           phba->ctrl.mbox_mem_alloced.size,
+                           phba->ctrl.mbox_mem_alloced.va,
+                           phba->ctrl.mbox_mem_alloced.dma);
+
+       pci_dev_put(phba->pcidev);
+       iscsi_host_free(phba->shost);
+       pci_disable_pcie_error_reporting(pcidev);
+       pci_set_drvdata(pcidev, NULL);
+       pci_release_regions(pcidev);
+       pci_disable_device(pcidev);
+}
+
+
 static struct pci_error_handlers beiscsi_eeh_handlers = {
        .error_detected = beiscsi_eeh_err_detected,
        .slot_reset = beiscsi_eeh_reset,
@@ -5846,9 +5903,9 @@ struct iscsi_transport beiscsi_iscsi_transport = {
        .create_conn = beiscsi_conn_create,
        .bind_conn = beiscsi_conn_bind,
        .destroy_conn = iscsi_conn_teardown,
-       .attr_is_visible = be2iscsi_attr_is_visible,
-       .set_iface_param = be2iscsi_iface_set_param,
-       .get_iface_param = be2iscsi_iface_get_param,
+       .attr_is_visible = beiscsi_attr_is_visible,
+       .set_iface_param = beiscsi_iface_set_param,
+       .get_iface_param = beiscsi_iface_get_param,
        .set_param = beiscsi_set_param,
        .get_conn_param = iscsi_conn_get_param,
        .get_session_param = iscsi_session_get_param,
@@ -5877,7 +5934,6 @@ static struct pci_driver beiscsi_pci_driver = {
        .err_handler = &beiscsi_eeh_handlers
 };
 
-
 static int __init beiscsi_module_init(void)
 {
        int ret;
index 30a4606d9a3b9fa0abff5d5c027817c0642874b8..6376657e45f7d3e5d2ad68ca6a34dc4916e835c7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "11.0.0.0"
+#define BUILD_STR              "11.2.0.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
 #define BEISCSI_MAX_FRAGS_INIT 192
 #define BE_NUM_MSIX_ENTRIES    1
 
-#define MPU_EP_CONTROL          0
-#define MPU_EP_SEMAPHORE        0xac
-#define BE2_SOFT_RESET          0x5c
-#define BE2_PCI_ONLINE0         0xb0
-#define BE2_PCI_ONLINE1         0xb4
-#define BE2_SET_RESET           0x80
-#define BE2_MPU_IRAM_ONLINE     0x00000080
-
 #define BE_SENSE_INFO_SIZE             258
 #define BE_ISCSI_PDU_HEADER_SIZE       64
 #define BE_MIN_MEM_SIZE                        16384
 #define MAX_CMD_SZ                     65536
 #define IIOC_SCSI_DATA                  0x05   /* Write Operation */
 
-#define INVALID_SESS_HANDLE    0xFFFFFFFF
-
-/**
- * Adapter States
- **/
-#define BE_ADAPTER_LINK_UP     0x001
-#define BE_ADAPTER_LINK_DOWN   0x002
-#define BE_ADAPTER_PCI_ERR     0x004
-#define BE_ADAPTER_CHECK_BOOT  0x008
-
-
-#define BEISCSI_CLEAN_UNLOAD   0x01
-#define BEISCSI_EEH_UNLOAD     0x02
-
-#define BE_GET_BOOT_RETRIES    45
-#define BE_GET_BOOT_TO         20
 /**
  * hardware needs the async PDU buffers to be posted in multiples of 8
  * So have atleast 8 of them by default
@@ -378,7 +354,6 @@ struct beiscsi_hba {
        struct sgl_handle **eh_sgl_hndl_base;
        spinlock_t io_sgl_lock;
        spinlock_t mgmt_sgl_lock;
-       spinlock_t isr_lock;
        spinlock_t async_pdu_lock;
        unsigned int age;
        struct list_head hba_queue;
@@ -390,7 +365,6 @@ struct beiscsi_hba {
        struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
        struct iscsi_endpoint **ep_array;
        struct beiscsi_conn **conn_table;
-       struct iscsi_boot_kset *boot_kset;
        struct Scsi_Host *shost;
        struct iscsi_iface *ipv4_iface;
        struct iscsi_iface *ipv6_iface;
@@ -418,12 +392,33 @@ struct beiscsi_hba {
                unsigned long ulp_supported;
        } fw_config;
 
-       unsigned int state;
+       unsigned long state;
+#define BEISCSI_HBA_ONLINE     0
+#define BEISCSI_HBA_LINK_UP    1
+#define BEISCSI_HBA_BOOT_FOUND 2
+#define BEISCSI_HBA_BOOT_WORK  3
+#define BEISCSI_HBA_UER_SUPP   4
+#define BEISCSI_HBA_PCI_ERR    5
+#define BEISCSI_HBA_FW_TIMEOUT 6
+#define BEISCSI_HBA_IN_UE      7
+#define BEISCSI_HBA_IN_TPE     8
+
+/* error bits */
+#define BEISCSI_HBA_IN_ERR     ((1 << BEISCSI_HBA_PCI_ERR) | \
+                                (1 << BEISCSI_HBA_FW_TIMEOUT) | \
+                                (1 << BEISCSI_HBA_IN_UE) | \
+                                (1 << BEISCSI_HBA_IN_TPE))
+
        u8 optic_state;
-       int get_boot;
-       bool fw_timeout;
-       bool ue_detected;
-       struct delayed_work beiscsi_hw_check_task;
+       struct delayed_work eqd_update;
+       /* update EQ delay timer every 1000ms */
+#define BEISCSI_EQD_UPDATE_INTERVAL    1000
+       struct timer_list hw_check;
+       /* check for UE every 1000ms */
+#define BEISCSI_UE_DETECT_INTERVAL     1000
+       u32 ue2rp;
+       struct delayed_work recover_port;
+       struct work_struct sess_work;
 
        bool mac_addr_set;
        u8 mac_address[ETH_ALEN];
@@ -435,7 +430,6 @@ struct beiscsi_hba {
        struct be_ctrl_info ctrl;
        unsigned int generation;
        unsigned int interface_handle;
-       struct mgmt_session_info boot_sess;
        struct invalidate_command_table inv_tbl[128];
 
        struct be_aic_obj aic_obj[MAX_CPUS];
@@ -444,8 +438,29 @@ struct beiscsi_hba {
                        struct scatterlist *sg,
                        uint32_t num_sg, uint32_t xferlen,
                        uint32_t writedir);
+       struct boot_struct {
+               int retry;
+               unsigned int tag;
+               unsigned int s_handle;
+               struct be_dma_mem nonemb_cmd;
+               enum {
+                       BEISCSI_BOOT_REOPEN_SESS = 1,
+                       BEISCSI_BOOT_GET_SHANDLE,
+                       BEISCSI_BOOT_GET_SINFO,
+                       BEISCSI_BOOT_LOGOUT_SESS,
+                       BEISCSI_BOOT_CREATE_KSET,
+               } action;
+               struct mgmt_session_info boot_sess;
+               struct iscsi_boot_kset *boot_kset;
+       } boot_struct;
+       struct work_struct boot_work;
 };
 
+#define beiscsi_hba_in_error(phba) ((phba)->state & BEISCSI_HBA_IN_ERR)
+#define beiscsi_hba_is_online(phba) \
+       (!beiscsi_hba_in_error((phba)) && \
+        test_bit(BEISCSI_HBA_ONLINE, &phba->state))
+
 struct beiscsi_session {
        struct pci_pool *bhs_pool;
 };
@@ -508,6 +523,7 @@ struct beiscsi_io_task {
        struct sgl_handle *psgl_handle;
        struct beiscsi_conn *conn;
        struct scsi_cmnd *scsi_cmnd;
+       int num_sg;
        struct hwi_wrb_context *pwrb_context;
        unsigned int cmd_sn;
        unsigned int flags;
@@ -592,80 +608,81 @@ struct amap_beiscsi_offload_params {
        u8 max_recv_data_segment_length[32];
 };
 
-/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
-               struct beiscsi_hba *phba, struct sol_cqe *psol);*/
-
-struct async_pdu_handle {
+struct hd_async_handle {
        struct list_head link;
        struct be_bus_address pa;
        void *pbuffer;
-       unsigned int consumed;
-       unsigned char index;
-       unsigned char is_header;
-       unsigned short cri;
-       unsigned long buffer_len;
+       u32 buffer_len;
+       u16 index;
+       u16 cri;
+       u8 is_header;
+       u8 is_final;
 };
 
-struct hwi_async_entry {
-       struct {
-               unsigned char hdr_received;
-               unsigned char hdr_len;
-               unsigned short bytes_received;
+/**
+ * This has list of async PDUs that are waiting to be processed.
+ * Buffers live in this list for a brief duration before they get
+ * processed and posted back to hardware.
+ * Note that we don't really need one cri_wait_queue per async_entry.
+ * We need one cri_wait_queue per CRI. Its easier to manage if this
+ * is tagged along with the async_entry.
+ */
+struct hd_async_entry {
+       struct cri_wait_queue {
+               unsigned short hdr_len;
+               unsigned int bytes_received;
                unsigned int bytes_needed;
                struct list_head list;
-       } wait_queue;
-
-       struct list_head header_busy_list;
-       struct list_head data_busy_list;
+       } wq;
+       /* handles posted to FW resides here */
+       struct hd_async_handle *header;
+       struct hd_async_handle *data;
 };
 
-struct hwi_async_pdu_context {
-       struct {
-               struct be_bus_address pa_base;
-               void *va_base;
-               void *ring_base;
-               struct async_pdu_handle *handle_base;
-
-               unsigned int host_write_ptr;
-               unsigned int ep_read_ptr;
-               unsigned int writables;
-
-               unsigned int free_entries;
-               unsigned int busy_entries;
-
-               struct list_head free_list;
-       } async_header;
+struct hd_async_buf_context {
+       struct be_bus_address pa_base;
+       void *va_base;
+       void *ring_base;
+       struct hd_async_handle *handle_base;
+       u16 free_entries;
+       u32 buffer_size;
+       /**
+        * Once iSCSI layer finishes processing an async PDU, the
+        * handles used for the PDU are added to this list.
+        * They are posted back to FW in groups of 8.
+        */
+       struct list_head free_list;
+};
 
-       struct {
-               struct be_bus_address pa_base;
-               void *va_base;
-               void *ring_base;
-               struct async_pdu_handle *handle_base;
-
-               unsigned int host_write_ptr;
-               unsigned int ep_read_ptr;
-               unsigned int writables;
-
-               unsigned int free_entries;
-               unsigned int busy_entries;
-               struct list_head free_list;
-       } async_data;
-
-       unsigned int buffer_size;
-       unsigned int num_entries;
+/**
+ * hd_async_context is declared for each ULP supporting iSCSI function.
+ */
+struct hd_async_context {
+       struct hd_async_buf_context async_header;
+       struct hd_async_buf_context async_data;
+       u16 num_entries;
+       /**
+        * When unsol PDU is in, it needs to be chained till all the bytes are
+        * received and then processing is done. hd_async_entry is created
+        * based on the cid_count for each ULP. When unsol PDU comes in based
+        * on the conn_id it needs to be added to the correct async_entry wq.
+        * Below defined cid_to_async_cri_map is used to reterive the
+        * async_cri_map for a particular connection.
+        *
+        * This array is initialized after beiscsi_create_wrb_rings returns.
+        *
+        * - this method takes more memory space, fixed to 2K
+        * - any support for connections greater than this the array size needs
+        * to be incremented
+        */
 #define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
        unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
        /**
-        * This is a varying size list! Do not add anything
-        * after this entry!!
+        * This is a variable size array. Don`t add anything after this field!!
         */
-       struct hwi_async_entry *async_entry;
+       struct hd_async_entry *async_entry;
 };
 
-#define PDUCQE_CODE_MASK       0x0000003F
-#define PDUCQE_DPL_MASK                0xFFFF0000
-#define PDUCQE_INDEX_MASK      0x0000FFFF
-
 struct i_t_dpdu_cqe {
        u32 dw[4];
 } __packed;
@@ -845,7 +862,6 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
 void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
-void beiscsi_process_all_cqs(struct work_struct *work);
 void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
                                     struct iscsi_task *task);
 
@@ -856,11 +872,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
 
-static inline bool beiscsi_error(struct beiscsi_hba *phba)
-{
-       return phba->ue_detected || phba->fw_timeout;
-}
-
 struct pdu_nop_out {
        u32 dw[12];
 };
@@ -1067,11 +1078,18 @@ struct hwi_context_memory {
        struct be_queue_info be_cq[MAX_CPUS - 1];
 
        struct be_queue_info *be_wrbq;
+       /**
+        * Create array of ULP number for below entries as DEFQ
+        * will be created for both ULP if iSCSI Protocol is
+        * loaded on both ULP.
+        */
        struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
        struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
-       struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
+       struct hd_async_context *pasync_ctx[BEISCSI_ULP_COUNT];
 };
 
+void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle);
+
 /* Logging related definitions */
 #define BEISCSI_LOG_INIT       0x0001  /* Initialization events */
 #define BEISCSI_LOG_MBOX       0x0002  /* Mailbox Events */
index 83926e221f1ecd3b55f05a0770d107d8c170c40b..aebc4ddb3060ee177c06ec9f11b62a021c346f10 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
 #include "be_iscsi.h"
 #include "be_main.h"
 
-/* UE Status Low CSR */
-static const char * const desc_ue_status_low[] = {
-       "CEV",
-       "CTX",
-       "DBUF",
-       "ERX",
-       "Host",
-       "MPU",
-       "NDMA",
-       "PTC ",
-       "RDMA ",
-       "RXF ",
-       "RXIPS ",
-       "RXULP0 ",
-       "RXULP1 ",
-       "RXULP2 ",
-       "TIM ",
-       "TPOST ",
-       "TPRE ",
-       "TXIPS ",
-       "TXULP0 ",
-       "TXULP1 ",
-       "UC ",
-       "WDMA ",
-       "TXULP2 ",
-       "HOST1 ",
-       "P0_OB_LINK ",
-       "P1_OB_LINK ",
-       "HOST_GPIO ",
-       "MBOX ",
-       "AXGMAC0",
-       "AXGMAC1",
-       "JTAG",
-       "MPU_INTPEND"
-};
-
-/* UE Status High CSR */
-static const char * const desc_ue_status_hi[] = {
-       "LPCMEMHOST",
-       "MGMT_MAC",
-       "PCS0ONLINE",
-       "MPU_IRAM",
-       "PCS1ONLINE",
-       "PCTL0",
-       "PCTL1",
-       "PMEM",
-       "RR",
-       "TXPB",
-       "RXPP",
-       "XAUI",
-       "TXP",
-       "ARM",
-       "IPC",
-       "HOST2",
-       "HOST3",
-       "HOST4",
-       "HOST5",
-       "HOST6",
-       "HOST7",
-       "HOST8",
-       "HOST9",
-       "NETC",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown"
-};
-
-/*
- * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
- * @phba: Driver priv structure
- *
- * Read registers linked to UE and check for the UE status
- **/
-void beiscsi_ue_detect(struct beiscsi_hba *phba)
-{
-       uint32_t ue_hi = 0, ue_lo = 0;
-       uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
-       uint8_t i = 0;
-
-       if (phba->ue_detected)
-               return;
-
-       pci_read_config_dword(phba->pcidev,
-                             PCICFG_UE_STATUS_LOW, &ue_lo);
-       pci_read_config_dword(phba->pcidev,
-                             PCICFG_UE_STATUS_MASK_LOW,
-                             &ue_mask_lo);
-       pci_read_config_dword(phba->pcidev,
-                             PCICFG_UE_STATUS_HIGH,
-                             &ue_hi);
-       pci_read_config_dword(phba->pcidev,
-                             PCICFG_UE_STATUS_MASK_HI,
-                             &ue_mask_hi);
-
-       ue_lo = (ue_lo & ~ue_mask_lo);
-       ue_hi = (ue_hi & ~ue_mask_hi);
-
-
-       if (ue_lo || ue_hi) {
-               phba->ue_detected = true;
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BG_%d : Error detected on the adapter\n");
-       }
-
-       if (ue_lo) {
-               for (i = 0; ue_lo; ue_lo >>= 1, i++) {
-                       if (ue_lo & 1)
-                               beiscsi_log(phba, KERN_ERR,
-                                           BEISCSI_LOG_CONFIG,
-                                           "BG_%d : UE_LOW %s bit set\n",
-                                           desc_ue_status_low[i]);
-               }
-       }
-
-       if (ue_hi) {
-               for (i = 0; ue_hi; ue_hi >>= 1, i++) {
-                       if (ue_hi & 1)
-                               beiscsi_log(phba, KERN_ERR,
-                                           BEISCSI_LOG_CONFIG,
-                                           "BG_%d : UE_HIGH %s bit set\n",
-                                           desc_ue_status_hi[i]);
-               }
-       }
-}
-
-int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
-                struct be_set_eqd *set_eqd, int num)
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
+                           struct be_set_eqd *set_eqd,
+                           int num)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
@@ -174,7 +44,7 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
        req = embedded_payload(wrb);
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+                          OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
 
        req->num_eq = cpu_to_le32(num);
        for (i = 0; i < num; i++) {
@@ -184,386 +54,13 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
                                cpu_to_le32(set_eqd[i].delay_multiplier);
        }
 
+       /* ignore the completion of this mbox command */
+       set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
        be_mcc_notify(phba, tag);
        mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
-/**
- * mgmt_reopen_session()- Reopen a session based on reopen_type
- * @phba: Device priv structure instance
- * @reopen_type: Type of reopen_session FW should do.
- * @sess_handle: Session Handle of the session to be re-opened
- *
- * return
- *     the TAG used for MBOX Command
- *
- **/
-unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
-                                 unsigned int reopen_type,
-                                 unsigned int sess_handle)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_reopen_session_req *req;
-       unsigned int tag;
-
-       beiscsi_log(phba, KERN_INFO,
-                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                   "BG_%d : In bescsi_get_boot_target\n");
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               return 0;
-       }
-
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-                          OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
-                          sizeof(struct be_cmd_reopen_session_resp));
-
-       /* set the reopen_type,sess_handle */
-       req->reopen_type = reopen_type;
-       req->session_handle = sess_handle;
-
-       be_mcc_notify(phba, tag);
-       mutex_unlock(&ctrl->mbox_lock);
-       return tag;
-}
-
-unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_get_boot_target_req *req;
-       unsigned int tag;
-
-       beiscsi_log(phba, KERN_INFO,
-                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                   "BG_%d : In bescsi_get_boot_target\n");
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               return 0;
-       }
-
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-                          OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
-                          sizeof(struct be_cmd_get_boot_target_resp));
-
-       be_mcc_notify(phba, tag);
-       mutex_unlock(&ctrl->mbox_lock);
-       return tag;
-}
-
-unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
-                                  u32 boot_session_handle,
-                                  struct be_dma_mem *nonemb_cmd)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       unsigned int tag;
-       struct  be_cmd_get_session_req *req;
-       struct be_cmd_get_session_resp *resp;
-       struct be_sge *sge;
-
-       beiscsi_log(phba, KERN_INFO,
-                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                   "BG_%d : In beiscsi_get_session_info\n");
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               return 0;
-       }
-
-       nonemb_cmd->size = sizeof(*resp);
-       req = nonemb_cmd->va;
-       memset(req, 0, sizeof(*req));
-       sge = nonembedded_sgl(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-                          OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
-                          sizeof(*resp));
-       req->session_handle = boot_session_handle;
-       sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
-       sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
-       sge->len = cpu_to_le32(nonemb_cmd->size);
-
-       be_mcc_notify(phba, tag);
-       mutex_unlock(&ctrl->mbox_lock);
-       return tag;
-}
-
-/**
- * mgmt_get_port_name()- Get port name for the function
- * @ctrl: ptr to Ctrl Info
- * @phba: ptr to the dev priv structure
- *
- * Get the alphanumeric character for port
- *
- **/
-int mgmt_get_port_name(struct be_ctrl_info *ctrl,
-                      struct beiscsi_hba *phba)
-{
-       int ret = 0;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_get_port_name *ioctl;
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       memset(wrb, 0, sizeof(*wrb));
-       ioctl = embedded_payload(wrb);
-
-       be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
-       be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
-                          OPCODE_COMMON_GET_PORT_NAME,
-                          EMBED_MBX_MAX_PAYLOAD_SIZE);
-       ret = be_mbox_notify(ctrl);
-       phba->port_name = 0;
-       if (!ret) {
-               phba->port_name = ioctl->p.resp.port_names >>
-                                 (phba->fw_config.phys_port * 8) & 0xff;
-       } else {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
-                           ret, ioctl->h.resp_hdr.status);
-       }
-
-       if (phba->port_name == 0)
-               phba->port_name = '?';
-
-       mutex_unlock(&ctrl->mbox_lock);
-       return ret;
-}
-
-/**
- * mgmt_get_fw_config()- Get the FW config for the function
- * @ctrl: ptr to Ctrl Info
- * @phba: ptr to the dev priv structure
- *
- * Get the FW config and resources available for the function.
- * The resources are created based on the count received here.
- *
- * return
- *     Success: 0
- *     Failure: Non-Zero Value
- **/
-int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
-                               struct beiscsi_hba *phba)
-{
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
-       uint32_t cid_count, icd_count;
-       int status = -EINVAL;
-       uint8_t ulp_num = 0;
-
-       mutex_lock(&ctrl->mbox_lock);
-       memset(wrb, 0, sizeof(*wrb));
-       be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
-
-       be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
-                          OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
-                          EMBED_MBX_MAX_PAYLOAD_SIZE);
-
-       if (be_mbox_notify(ctrl)) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d : Failed in mgmt_get_fw_config\n");
-               goto fail_init;
-       }
-
-       /* FW response formats depend on port id */
-       phba->fw_config.phys_port = pfw_cfg->phys_port;
-       if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d : invalid physical port id %d\n",
-                           phba->fw_config.phys_port);
-               goto fail_init;
-       }
-
-       /* populate and check FW config against min and max values */
-       if (!is_chip_be2_be3r(phba)) {
-               phba->fw_config.eqid_count = pfw_cfg->eqid_count;
-               phba->fw_config.cqid_count = pfw_cfg->cqid_count;
-               if (phba->fw_config.eqid_count == 0 ||
-                   phba->fw_config.eqid_count > 2048) {
-                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                                   "BG_%d : invalid EQ count %d\n",
-                                   phba->fw_config.eqid_count);
-                       goto fail_init;
-               }
-               if (phba->fw_config.cqid_count == 0 ||
-                   phba->fw_config.cqid_count > 4096) {
-                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                                   "BG_%d : invalid CQ count %d\n",
-                                   phba->fw_config.cqid_count);
-                       goto fail_init;
-               }
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BG_%d : EQ_Count : %d CQ_Count : %d\n",
-                           phba->fw_config.eqid_count,
-                           phba->fw_config.cqid_count);
-       }
-
-       /**
-        * Check on which all ULP iSCSI Protocol is loaded.
-        * Set the Bit for those ULP. This set flag is used
-        * at all places in the code to check on which ULP
-        * iSCSi Protocol is loaded
-        **/
-       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-               if (pfw_cfg->ulp[ulp_num].ulp_mode &
-                   BEISCSI_ULP_ISCSI_INI_MODE) {
-                       set_bit(ulp_num, &phba->fw_config.ulp_supported);
-
-                       /* Get the CID, ICD and Chain count for each ULP */
-                       phba->fw_config.iscsi_cid_start[ulp_num] =
-                               pfw_cfg->ulp[ulp_num].sq_base;
-                       phba->fw_config.iscsi_cid_count[ulp_num] =
-                               pfw_cfg->ulp[ulp_num].sq_count;
-
-                       phba->fw_config.iscsi_icd_start[ulp_num] =
-                               pfw_cfg->ulp[ulp_num].icd_base;
-                       phba->fw_config.iscsi_icd_count[ulp_num] =
-                               pfw_cfg->ulp[ulp_num].icd_count;
-
-                       phba->fw_config.iscsi_chain_start[ulp_num] =
-                               pfw_cfg->chain_icd[ulp_num].chain_base;
-                       phba->fw_config.iscsi_chain_count[ulp_num] =
-                               pfw_cfg->chain_icd[ulp_num].chain_count;
-
-                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                                   "BG_%d : Function loaded on ULP : %d\n"
-                                   "\tiscsi_cid_count : %d\n"
-                                   "\tiscsi_cid_start : %d\n"
-                                   "\t iscsi_icd_count : %d\n"
-                                   "\t iscsi_icd_start : %d\n",
-                                   ulp_num,
-                                   phba->fw_config.
-                                   iscsi_cid_count[ulp_num],
-                                   phba->fw_config.
-                                   iscsi_cid_start[ulp_num],
-                                   phba->fw_config.
-                                   iscsi_icd_count[ulp_num],
-                                   phba->fw_config.
-                                   iscsi_icd_start[ulp_num]);
-               }
-       }
-
-       if (phba->fw_config.ulp_supported == 0) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
-                           pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
-                           pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
-               goto fail_init;
-       }
-
-       /**
-        * ICD is shared among ULPs. Use icd_count of any one loaded ULP
-        **/
-       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
-               if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
-                       break;
-       icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
-       if (icd_count == 0 || icd_count > 65536) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d: invalid ICD count %d\n", icd_count);
-               goto fail_init;
-       }
-
-       cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
-                   BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
-       if (cid_count == 0 || cid_count > 4096) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d: invalid CID count %d\n", cid_count);
-               goto fail_init;
-       }
-
-       /**
-        * Check FW is dual ULP aware i.e. can handle either
-        * of the protocols.
-        */
-       phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
-                                         BEISCSI_FUNC_DUA_MODE);
-
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BG_%d : DUA Mode : 0x%x\n",
-                   phba->fw_config.dual_ulp_aware);
-
-       /* all set, continue using this FW config */
-       status = 0;
-fail_init:
-       mutex_unlock(&ctrl->mbox_lock);
-       return status;
-}
-
-int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
-                                     struct beiscsi_hba *phba)
-{
-       struct be_dma_mem nonemb_cmd;
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_mgmt_controller_attributes *req;
-       struct be_sge *sge = nonembedded_sgl(wrb);
-       int status = 0;
-
-       nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
-                               sizeof(struct be_mgmt_controller_attributes),
-                               &nonemb_cmd.dma);
-       if (nonemb_cmd.va == NULL) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d : Failed to allocate memory for "
-                           "mgmt_check_supported_fw\n");
-               return -ENOMEM;
-       }
-       nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
-       req = nonemb_cmd.va;
-       memset(req, 0, sizeof(*req));
-       mutex_lock(&ctrl->mbox_lock);
-       memset(wrb, 0, sizeof(*wrb));
-       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                          OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
-       sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
-       sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
-       sge->len = cpu_to_le32(nonemb_cmd.size);
-       status = be_mbox_notify(ctrl);
-       if (!status) {
-               struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BG_%d : Firmware Version of CMD : %s\n"
-                           "Firmware Version is : %s\n"
-                           "Developer Build, not performing version check...\n",
-                           resp->params.hba_attribs
-                           .flashrom_version_string,
-                           resp->params.hba_attribs.
-                           firmware_version_string);
-
-               phba->fw_config.iscsi_features =
-                               resp->params.hba_attribs.iscsi_features;
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BM_%d : phba->fw_config.iscsi_features = %d\n",
-                           phba->fw_config.iscsi_features);
-               memcpy(phba->fw_ver_str, resp->params.hba_attribs.
-                      firmware_version_string, BEISCSI_VER_STRLEN);
-       } else
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d :  Failed in mgmt_check_supported_fw\n");
-       mutex_unlock(&ctrl->mbox_lock);
-       if (nonemb_cmd.va)
-               pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
-                                   nonemb_cmd.va, nonemb_cmd.dma);
-
-       return status;
-}
-
 unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                                         struct beiscsi_hba *phba,
                                         struct bsg_job *job,
@@ -609,7 +106,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                            bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
 
                mutex_unlock(&ctrl->mbox_lock);
-               return -ENOSYS;
+               return -EPERM;
        }
 
        wrb = alloc_mcc_wrb(phba, &tag);
@@ -631,48 +128,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
        return tag;
 }
 
-/**
- * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
- * @phba: pointer to dev priv structure
- * @ulp_num: ULP number.
- *
- * return
- *     Success: 0
- *     Failure: Non-Zero Value
- **/
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       struct iscsi_cleanup_req *req;
-       unsigned int tag;
-       int status;
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               return -EBUSY;
-       }
-
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-                          OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
-
-       req->chute = (1 << ulp_num);
-       req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
-       req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
-
-       be_mcc_notify(phba, tag);
-       status = be_mcc_compl_poll(phba, tag);
-       if (status)
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BG_%d : mgmt_epfw_cleanup , FAILED\n");
-       mutex_unlock(&ctrl->mbox_lock);
-       return status;
-}
-
 unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
                                struct invalidate_command_table *inv_tbl,
                                unsigned int num_invalidate, unsigned int cid,
@@ -844,7 +299,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                           nonemb_cmd->size);
        if (dst_addr->sa_family == PF_INET) {
                __be32 s_addr = daddr_in->sin_addr.s_addr;
-               req->ip_address.ip_type = BE2_IPV4;
+               req->ip_address.ip_type = BEISCSI_IP_TYPE_V4;
                req->ip_address.addr[0] = s_addr & 0x000000ff;
                req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
                req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
@@ -852,17 +307,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                req->tcp_port = ntohs(daddr_in->sin_port);
                beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
                beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
-               beiscsi_ep->ip_type = BE2_IPV4;
+               beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V4;
        } else {
                /* else its PF_INET6 family */
-               req->ip_address.ip_type = BE2_IPV6;
+               req->ip_address.ip_type = BEISCSI_IP_TYPE_V6;
                memcpy(&req->ip_address.addr,
                       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
                req->tcp_port = ntohs(daddr_in6->sin6_port);
                beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
                memcpy(&beiscsi_ep->dst6_addr,
                       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
-               beiscsi_ep->ip_type = BE2_IPV6;
+               beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V6;
        }
        req->cid = cid;
        i = phba->nxt_cqid++;
@@ -883,7 +338,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 
        if (!is_chip_be2_be3r(phba)) {
                req->hdr.version = MBX_CMD_VER1;
-               req->tcp_window_size = 0;
+               req->tcp_window_size = 0x8000;
                req->tcp_window_scale_count = 2;
        }
 
@@ -892,44 +347,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        return tag;
 }
 
-unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_get_all_if_id_req *req;
-       struct be_cmd_get_all_if_id_req *pbe_allid;
-       unsigned int tag;
-       int status = 0;
-
-       if (mutex_lock_interruptible(&ctrl->mbox_lock))
-               return -EINTR;
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               return -ENOMEM;
-       }
-
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-                          OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
-                          sizeof(*req));
-       be_mcc_notify(phba, tag);
-       mutex_unlock(&ctrl->mbox_lock);
-
-       status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
-       if (status) {
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                           "BG_%d : Failed in mgmt_get_all_if_id\n");
-               return -EBUSY;
-       }
-
-       pbe_allid = embedded_payload(wrb);
-       phba->interface_handle = pbe_allid->if_hndl_list[0];
-
-       return status;
-}
-
 /*
  * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
  * @phba: Driver priv structure
@@ -1001,72 +418,68 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
        }
        cmd->size = size;
        be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BG_%d : subsystem iSCSI cmd %d size %d\n",
+                   iscsi_cmd, size);
        return 0;
 }
 
-static int
-mgmt_static_ip_modify(struct beiscsi_hba *phba,
-                     struct be_cmd_get_if_info_resp *if_info,
-                     struct iscsi_iface_param_info *ip_param,
-                     struct iscsi_iface_param_info *subnet_param,
-                     uint32_t ip_action)
+unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
 {
-       struct be_cmd_set_ip_addr_req *req;
-       struct be_dma_mem nonemb_cmd;
-       uint32_t ip_type;
-       int rc;
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_get_all_if_id_req *req;
+       struct be_cmd_get_all_if_id_req *pbe_allid;
+       unsigned int tag;
+       int status = 0;
 
-       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-                                OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
-                                sizeof(*req));
-       if (rc)
-               return rc;
+       if (mutex_lock_interruptible(&ctrl->mbox_lock))
+               return -EINTR;
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return -ENOMEM;
+       }
 
-       ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
-               BE2_IPV6 : BE2_IPV4 ;
+       req = embedded_payload(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+                          OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
+                          sizeof(*req));
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
 
-       req = nonemb_cmd.va;
-       req->ip_params.record_entry_count = 1;
-       req->ip_params.ip_record.action = ip_action;
-       req->ip_params.ip_record.interface_hndl =
-               phba->interface_handle;
-       req->ip_params.ip_record.ip_addr.size_of_structure =
-               sizeof(struct be_ip_addr_subnet_format);
-       req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+       status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
+       if (status) {
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : %s failed: %d\n", __func__, status);
+               return -EBUSY;
+       }
 
-       if (ip_action == IP_ACTION_ADD) {
-               memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
-                      sizeof(req->ip_params.ip_record.ip_addr.addr));
+       pbe_allid = embedded_payload(wrb);
+       /* we now support only one interface per function */
+       phba->interface_handle = pbe_allid->if_hndl_list[0];
 
-               if (subnet_param)
-                       memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-                              subnet_param->value,
-                              sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
-       } else {
-               memcpy(req->ip_params.ip_record.ip_addr.addr,
-                      if_info->ip_addr.addr,
-                      sizeof(req->ip_params.ip_record.ip_addr.addr));
+       return status;
+}
 
-               memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-                      if_info->ip_addr.subnet_mask,
-                      sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
-       }
+static inline bool beiscsi_if_zero_ip(u8 *ip, u32 ip_type)
+{
+       u32 len;
 
-       rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
-       if (rc < 0)
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                           "BG_%d : Failed to Modify existing IP Address\n");
-       return rc;
+       len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
+       while (len && !ip[len - 1])
+               len--;
+       return (len == 0);
 }
 
-static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
-                              uint32_t gtway_action, uint32_t param_len)
+static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
+                            u32 action, u32 ip_type, u8 *gw)
 {
        struct be_cmd_set_def_gateway_req *req;
        struct be_dma_mem nonemb_cmd;
        int rt_val;
 
-
        rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
                                OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
                                sizeof(*req));
@@ -1074,200 +487,300 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
                return rt_val;
 
        req = nonemb_cmd.va;
-       req->action = gtway_action;
-       req->ip_addr.ip_type = BE2_IPV4;
+       req->action = action;
+       req->ip_addr.ip_type = ip_type;
+       memcpy(req->ip_addr.addr, gw,
+              (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
+       return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+}
 
-       memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
+{
+       struct be_cmd_get_def_gateway_resp gw_resp;
+       int rt_val;
 
-       return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+       memset(&gw_resp, 0, sizeof(gw_resp));
+       rt_val = beiscsi_if_get_gw(phba, ip_type, &gw_resp);
+       if (rt_val) {
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to Get Gateway Addr\n");
+               return rt_val;
+       }
+
+       if (!beiscsi_if_zero_ip(gw_resp.ip_addr.addr, ip_type)) {
+               rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, ip_type,
+                                          gw_resp.ip_addr.addr);
+               if (rt_val) {
+                       beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                                   "BG_%d : Failed to clear Gateway Addr Set\n");
+                       return rt_val;
+               }
+       }
+
+       rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_ADD, ip_type, gw);
+       if (rt_val)
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to Set Gateway Addr\n");
+
+       return rt_val;
 }
 
-int mgmt_set_ip(struct beiscsi_hba *phba,
-               struct iscsi_iface_param_info *ip_param,
-               struct iscsi_iface_param_info *subnet_param,
-               uint32_t boot_proto)
+int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
+                     struct be_cmd_get_def_gateway_resp *resp)
 {
-       struct be_cmd_get_def_gateway_resp gtway_addr_set;
-       struct be_cmd_get_if_info_resp *if_info;
-       struct be_cmd_set_dhcp_req *dhcpreq;
-       struct be_cmd_rel_dhcp_req *reldhcp;
+       struct be_cmd_get_def_gateway_req *req;
        struct be_dma_mem nonemb_cmd;
-       uint8_t *gtway_addr;
-       uint32_t ip_type;
        int rc;
 
-       rc = mgmt_get_all_if_id(phba);
+       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+                                OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+                                sizeof(*resp));
        if (rc)
                return rc;
 
-       ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
-               BE2_IPV6 : BE2_IPV4 ;
+       req = nonemb_cmd.va;
+       req->ip_type = ip_type;
+
+       return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp,
+                                   sizeof(*resp));
+}
 
-       rc = mgmt_get_if_info(phba, ip_type, &if_info);
+static int
+beiscsi_if_clr_ip(struct beiscsi_hba *phba,
+                 struct be_cmd_get_if_info_resp *if_info)
+{
+       struct be_cmd_set_ip_addr_req *req;
+       struct be_dma_mem nonemb_cmd;
+       int rc;
+
+       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+                                OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+                                sizeof(*req));
        if (rc)
                return rc;
 
-       if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-               if (if_info->dhcp_state) {
-                       beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                                   "BG_%d : DHCP Already Enabled\n");
-                       goto exit;
-               }
-               /* The ip_param->len is 1 in DHCP case. Setting
-                  proper IP len as this it is used while
-                  freeing the Static IP.
-                */
-               ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
-                               IP_V6_LEN : IP_V4_LEN;
-
-       } else {
-               if (if_info->dhcp_state) {
+       req = nonemb_cmd.va;
+       req->ip_params.record_entry_count = 1;
+       req->ip_params.ip_record.action = IP_ACTION_DEL;
+       req->ip_params.ip_record.interface_hndl =
+               phba->interface_handle;
+       req->ip_params.ip_record.ip_addr.size_of_structure =
+               sizeof(struct be_ip_addr_subnet_format);
+       req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type;
+       memcpy(req->ip_params.ip_record.ip_addr.addr,
+              if_info->ip_addr.addr,
+              sizeof(if_info->ip_addr.addr));
+       memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+              if_info->ip_addr.subnet_mask,
+              sizeof(if_info->ip_addr.subnet_mask));
+       rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+       if (rc < 0 || req->ip_params.ip_record.status) {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BG_%d : failed to clear IP: rc %d status %d\n",
+                           rc, req->ip_params.ip_record.status);
+       }
+       return rc;
+}
 
-                       memset(if_info, 0, sizeof(*if_info));
-                       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-                               OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
-                               sizeof(*reldhcp));
+static int
+beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
+                 u8 *subnet, u32 ip_type)
+{
+       struct be_cmd_set_ip_addr_req *req;
+       struct be_dma_mem nonemb_cmd;
+       uint32_t ip_len;
+       int rc;
 
-                       if (rc)
-                               goto exit;
+       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+                                OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+                                sizeof(*req));
+       if (rc)
+               return rc;
 
-                       reldhcp = nonemb_cmd.va;
-                       reldhcp->interface_hndl = phba->interface_handle;
-                       reldhcp->ip_type = ip_type;
+       req = nonemb_cmd.va;
+       req->ip_params.record_entry_count = 1;
+       req->ip_params.ip_record.action = IP_ACTION_ADD;
+       req->ip_params.ip_record.interface_hndl =
+               phba->interface_handle;
+       req->ip_params.ip_record.ip_addr.size_of_structure =
+               sizeof(struct be_ip_addr_subnet_format);
+       req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+       ip_len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
+       memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len);
+       if (subnet)
+               memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+                      subnet, ip_len);
 
-                       rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
-                       if (rc < 0) {
-                               beiscsi_log(phba, KERN_WARNING,
-                                           BEISCSI_LOG_CONFIG,
-                                           "BG_%d : Failed to Delete existing dhcp\n");
-                               goto exit;
-                       }
-               }
+       rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+       /**
+        * In some cases, host needs to look into individual record status
+        * even though FW reported success for that IOCTL.
+        */
+       if (rc < 0 || req->ip_params.ip_record.status) {
+               __beiscsi_log(phba, KERN_ERR,
+                           "BG_%d : failed to set IP: rc %d status %d\n",
+                           rc, req->ip_params.ip_record.status);
+               if (req->ip_params.ip_record.status)
+                       rc = -EINVAL;
        }
+       return rc;
+}
 
-       /* Delete the Static IP Set */
-       if (if_info->ip_addr.addr[0]) {
-               rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
-                                          IP_ACTION_DEL);
+int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
+                        u8 *ip, u8 *subnet)
+{
+       struct be_cmd_get_if_info_resp *if_info;
+       struct be_cmd_rel_dhcp_req *reldhcp;
+       struct be_dma_mem nonemb_cmd;
+       int rc;
+
+       rc = beiscsi_if_get_info(phba, ip_type, &if_info);
+       if (rc)
+               return rc;
+
+       if (if_info->dhcp_state) {
+               rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+                               OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
+                               sizeof(*reldhcp));
                if (rc)
                        goto exit;
-       }
 
-       /* Delete the Gateway settings if mode change is to DHCP */
-       if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-               memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
-               rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
-               if (rc) {
+               reldhcp = nonemb_cmd.va;
+               reldhcp->interface_hndl = phba->interface_handle;
+               reldhcp->ip_type = ip_type;
+               rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+               if (rc < 0) {
                        beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                                   "BG_%d : Failed to Get Gateway Addr\n");
+                                   "BG_%d : failed to release existing DHCP: %d\n",
+                                   rc);
                        goto exit;
                }
-
-               if (gtway_addr_set.ip_addr.addr[0]) {
-                       gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
-                       rc = mgmt_modify_gateway(phba, gtway_addr,
-                                                IP_ACTION_DEL, IP_V4_LEN);
-
-                       if (rc) {
-                               beiscsi_log(phba, KERN_WARNING,
-                                           BEISCSI_LOG_CONFIG,
-                                           "BG_%d : Failed to clear Gateway Addr Set\n");
-                               goto exit;
-                       }
-               }
        }
 
-       /* Set Adapter to DHCP/Static Mode */
-       if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-               rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-                       OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
-                       sizeof(*dhcpreq));
+       /* first delete any IP set */
+       if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
+               rc = beiscsi_if_clr_ip(phba, if_info);
                if (rc)
                        goto exit;
-
-               dhcpreq = nonemb_cmd.va;
-               dhcpreq->flags = BLOCKING;
-               dhcpreq->retry_count = 1;
-               dhcpreq->interface_hndl = phba->interface_handle;
-               dhcpreq->ip_type = BE2_DHCP_V4;
-
-               rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
-       } else {
-               rc = mgmt_static_ip_modify(phba, if_info, ip_param,
-                                            subnet_param, IP_ACTION_ADD);
        }
 
+       /* if ip == NULL then this is called just to release DHCP IP */
+       if (ip)
+               rc = beiscsi_if_set_ip(phba, ip, subnet, ip_type);
 exit:
        kfree(if_info);
        return rc;
 }
 
-int mgmt_set_gateway(struct beiscsi_hba *phba,
-                    struct iscsi_iface_param_info *gateway_param)
+int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
 {
-       struct be_cmd_get_def_gateway_resp gtway_addr_set;
-       uint8_t *gtway_addr;
-       int rt_val;
+       struct be_cmd_get_def_gateway_resp gw_resp;
+       struct be_cmd_get_if_info_resp *if_info;
+       struct be_cmd_set_dhcp_req *dhcpreq;
+       struct be_dma_mem nonemb_cmd;
+       u8 *gw;
+       int rc;
 
-       memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
-       rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
-       if (rt_val) {
+       rc = beiscsi_if_get_info(phba, ip_type, &if_info);
+       if (rc)
+               return rc;
+
+       if (if_info->dhcp_state) {
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                           "BG_%d : Failed to Get Gateway Addr\n");
-               return rt_val;
+                               "BG_%d : DHCP Already Enabled\n");
+               goto exit;
        }
 
-       if (gtway_addr_set.ip_addr.addr[0]) {
-               gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
-               rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
-                                            gateway_param->len);
-               if (rt_val) {
+       /* first delete any IP set */
+       if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
+               rc = beiscsi_if_clr_ip(phba, if_info);
+               if (rc)
+                       goto exit;
+       }
+
+       /* delete gateway settings if mode change is to DHCP */
+       memset(&gw_resp, 0, sizeof(gw_resp));
+       /* use ip_type provided in if_info */
+       rc = beiscsi_if_get_gw(phba, if_info->ip_addr.ip_type, &gw_resp);
+       if (rc) {
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to Get Gateway Addr\n");
+               goto exit;
+       }
+       gw = (u8 *)&gw_resp.ip_addr.addr;
+       if (!beiscsi_if_zero_ip(gw, if_info->ip_addr.ip_type)) {
+               rc = beiscsi_if_mod_gw(phba, IP_ACTION_DEL,
+                                      if_info->ip_addr.ip_type, gw);
+               if (rc) {
                        beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
                                    "BG_%d : Failed to clear Gateway Addr Set\n");
-                       return rt_val;
+                       goto exit;
                }
        }
 
-       gtway_addr = (uint8_t *)&gateway_param->value;
-       rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
-                                    gateway_param->len);
+       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+                       OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
+                       sizeof(*dhcpreq));
+       if (rc)
+               goto exit;
 
-       if (rt_val)
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-                           "BG_%d : Failed to Set Gateway Addr\n");
+       dhcpreq = nonemb_cmd.va;
+       dhcpreq->flags = 1; /* 1 - blocking; 0 - non-blocking */
+       dhcpreq->retry_count = 1;
+       dhcpreq->interface_hndl = phba->interface_handle;
+       dhcpreq->ip_type = ip_type;
+       rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
 
-       return rt_val;
+exit:
+       kfree(if_info);
+       return rc;
 }
 
-int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
-                    struct be_cmd_get_def_gateway_resp *gateway)
+/**
+ * beiscsi_if_set_vlan()- Issue and wait for CMD completion
+ * @phba: device private structure instance
+ * @vlan_tag: VLAN tag
+ *
+ * Issue the MBX Cmd and wait for the completion of the
+ * command.
+ *
+ * returns
+ *     Success: 0
+ *     Failure: Non-Xero Value
+ **/
+int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag)
 {
-       struct be_cmd_get_def_gateway_req *req;
-       struct be_dma_mem nonemb_cmd;
        int rc;
+       unsigned int tag;
 
-       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-                                OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
-                                sizeof(*gateway));
-       if (rc)
-               return rc;
-
-       req = nonemb_cmd.va;
-       req->ip_type = ip_type;
+       tag = be_cmd_set_vlan(phba, vlan_tag);
+       if (!tag) {
+               beiscsi_log(phba, KERN_ERR,
+                           (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+                           "BG_%d : VLAN Setting Failed\n");
+               return -EBUSY;
+       }
 
-       return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
-                                   sizeof(*gateway));
+       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+       if (rc) {
+               beiscsi_log(phba, KERN_ERR,
+                           (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+                           "BS_%d : VLAN MBX Cmd Failed\n");
+               return rc;
+       }
+       return rc;
 }
 
-int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-                    struct be_cmd_get_if_info_resp **if_info)
+
+int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
+                       struct be_cmd_get_if_info_resp **if_info)
 {
        struct be_cmd_get_if_info_req *req;
        struct be_dma_mem nonemb_cmd;
        uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
        int rc;
 
-       rc = mgmt_get_all_if_id(phba);
+       rc = beiscsi_if_get_handle(phba);
        if (rc)
                return rc;
 
@@ -1364,123 +877,317 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
        return tag;
 }
 
+static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
+                                      unsigned int tag)
+{
+       struct be_cmd_get_boot_target_resp *boot_resp;
+       struct be_cmd_resp_logout_fw_sess *logo_resp;
+       struct be_cmd_get_session_resp *sess_resp;
+       struct be_mcc_wrb *wrb;
+       struct boot_struct *bs;
+       int boot_work, status;
+
+       if (!test_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BG_%d : %s no boot work %lx\n",
+                             __func__, phba->state);
+               return;
+       }
+
+       if (phba->boot_struct.tag != tag) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BG_%d : %s tag mismatch %d:%d\n",
+                             __func__, tag, phba->boot_struct.tag);
+               return;
+       }
+       bs = &phba->boot_struct;
+       boot_work = 1;
+       status = 0;
+       switch (bs->action) {
+       case BEISCSI_BOOT_REOPEN_SESS:
+               status = __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+               if (!status)
+                       bs->action = BEISCSI_BOOT_GET_SHANDLE;
+               else
+                       bs->retry--;
+               break;
+       case BEISCSI_BOOT_GET_SHANDLE:
+               status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
+               if (!status) {
+                       boot_resp = embedded_payload(wrb);
+                       bs->s_handle = boot_resp->boot_session_handle;
+               }
+               if (bs->s_handle == BE_BOOT_INVALID_SHANDLE) {
+                       bs->action = BEISCSI_BOOT_REOPEN_SESS;
+                       bs->retry--;
+               } else {
+                       bs->action = BEISCSI_BOOT_GET_SINFO;
+               }
+               break;
+       case BEISCSI_BOOT_GET_SINFO:
+               status = __beiscsi_mcc_compl_status(phba, tag, NULL,
+                                                   &bs->nonemb_cmd);
+               if (!status) {
+                       sess_resp = bs->nonemb_cmd.va;
+                       memcpy(&bs->boot_sess, &sess_resp->session_info,
+                              sizeof(struct mgmt_session_info));
+                       bs->action = BEISCSI_BOOT_LOGOUT_SESS;
+               } else {
+                       __beiscsi_log(phba, KERN_ERR,
+                                     "BG_%d : get boot session info error : 0x%x\n",
+                                     status);
+                       boot_work = 0;
+               }
+               pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+                                   bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
+               bs->nonemb_cmd.va = NULL;
+               break;
+       case BEISCSI_BOOT_LOGOUT_SESS:
+               status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
+               if (!status) {
+                       logo_resp = embedded_payload(wrb);
+                       if (logo_resp->session_status != BE_SESS_STATUS_CLOSE) {
+                               __beiscsi_log(phba, KERN_ERR,
+                                             "BG_%d : FW boot session logout error : 0x%x\n",
+                                             logo_resp->session_status);
+                       }
+               }
+               /* continue to create boot_kset even if logout failed? */
+               bs->action = BEISCSI_BOOT_CREATE_KSET;
+               break;
+       default:
+               break;
+       }
+
+       /* clear the tag so no other completion matches this tag */
+       bs->tag = 0;
+       if (!bs->retry) {
+               boot_work = 0;
+               __beiscsi_log(phba, KERN_ERR,
+                             "BG_%d : failed to setup boot target: status %d action %d\n",
+                             status, bs->action);
+       }
+       if (!boot_work) {
+               /* wait for next event to start boot_work */
+               clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+               return;
+       }
+       schedule_work(&phba->boot_work);
+}
+
 /**
- * be_mgmt_get_boot_shandle()- Get the session handle
- * @phba: device priv structure instance
- * @s_handle: session handle returned for boot session.
+ * beiscsi_boot_logout_sess()- Logout from boot FW session
+ * @phba: Device priv structure instance
+ *
+ * return
+ *     the TAG used for MBOX Command
  *
- * Get the boot target session handle. In case of
- * crashdump mode driver has to issue and MBX Cmd
- * for FW to login to boot target
+ */
+unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_logout_fw_sess *req;
+       unsigned int tag;
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
+       }
+
+       req = embedded_payload(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+                          OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
+                          sizeof(struct be_cmd_req_logout_fw_sess));
+       /* Use the session handle copied into boot_sess */
+       req->session_handle = phba->boot_struct.boot_sess.session_handle;
+
+       phba->boot_struct.tag = tag;
+       set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+       ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
+
+       return tag;
+}
+/**
+ * beiscsi_boot_reopen_sess()- Reopen boot session
+ * @phba: Device priv structure instance
  *
  * return
- *     Success: 0
- *     Failure: Non-Zero value
+ *     the TAG used for MBOX Command
  *
  **/
-int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
-                             unsigned int *s_handle)
+unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
 {
-       struct be_cmd_get_boot_target_resp *boot_resp;
+       struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
+       struct be_cmd_reopen_session_req *req;
        unsigned int tag;
-       uint8_t boot_retry = 3;
-       int rc;
 
-       do {
-               /* Get the Boot Target Session Handle and Count*/
-               tag = mgmt_get_boot_target(phba);
-               if (!tag) {
-                       beiscsi_log(phba, KERN_ERR,
-                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                                   "BG_%d : Getting Boot Target Info Failed\n");
-                       return -EAGAIN;
-               }
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
+       }
 
-               rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
-               if (rc) {
-                       beiscsi_log(phba, KERN_ERR,
-                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                                   "BG_%d : MBX CMD get_boot_target Failed\n");
-                       return -EBUSY;
-               }
+       req = embedded_payload(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+                          OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
+                          sizeof(struct be_cmd_reopen_session_resp));
+       req->reopen_type = BE_REOPEN_BOOT_SESSIONS;
+       req->session_handle = BE_BOOT_INVALID_SHANDLE;
 
-               boot_resp = embedded_payload(wrb);
+       phba->boot_struct.tag = tag;
+       set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+       ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
 
-               /* Check if the there are any Boot targets configured */
-               if (!boot_resp->boot_session_count) {
-                       beiscsi_log(phba, KERN_INFO,
-                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                                   "BG_%d  ;No boot targets configured\n");
-                       return -ENXIO;
-               }
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
+       return tag;
+}
 
-               /* FW returns the session handle of the boot session */
-               if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
-                       *s_handle = boot_resp->boot_session_handle;
-                       return 0;
-               }
 
-               /* Issue MBX Cmd to FW to login to the boot target */
-               tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
-                                         INVALID_SESS_HANDLE);
-               if (!tag) {
-                       beiscsi_log(phba, KERN_ERR,
-                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                                   "BG_%d : mgmt_reopen_session Failed\n");
-                       return -EAGAIN;
-               }
+/**
+ * beiscsi_boot_get_sinfo()- Get boot session info
+ * @phba: device priv structure instance
+ *
+ * Fetches the boot_struct.s_handle info from FW.
+ * return
+ *     the TAG used for MBOX Command
+ *
+ **/
+unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_cmd_get_session_resp *resp;
+       struct be_cmd_get_session_req *req;
+       struct be_dma_mem *nonemb_cmd;
+       struct be_mcc_wrb *wrb;
+       struct be_sge *sge;
+       unsigned int tag;
 
-               rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
-               if (rc) {
-                       beiscsi_log(phba, KERN_ERR,
-                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                                   "BG_%d : mgmt_reopen_session Failed");
-                       return rc;
-               }
-       } while (--boot_retry);
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
+       }
+
+       nonemb_cmd = &phba->boot_struct.nonemb_cmd;
+       nonemb_cmd->size = sizeof(*resp);
+       nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+                                             sizeof(nonemb_cmd->size),
+                                             &nonemb_cmd->dma);
+       if (!nonemb_cmd->va) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
+       }
+
+       req = nonemb_cmd->va;
+       memset(req, 0, sizeof(*req));
+       sge = nonembedded_sgl(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+                          OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
+                          sizeof(*resp));
+       req->session_handle = phba->boot_struct.s_handle;
+       sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+       sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+       sge->len = cpu_to_le32(nonemb_cmd->size);
+
+       phba->boot_struct.tag = tag;
+       set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+       ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
+       return tag;
+}
+
+unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_get_boot_target_req *req;
+       unsigned int tag;
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
+       }
+
+       req = embedded_payload(wrb);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+                          OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
+                          sizeof(struct be_cmd_get_boot_target_resp));
 
-       /* Couldn't log into the boot target */
-       beiscsi_log(phba, KERN_ERR,
-                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                   "BG_%d : Login to Boot Target Failed\n");
-       return -ENXIO;
+       if (async) {
+               phba->boot_struct.tag = tag;
+               set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+               ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+       }
+
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
+       return tag;
 }
 
 /**
- * mgmt_set_vlan()- Issue and wait for CMD completion
- * @phba: device private structure instance
- * @vlan_tag: VLAN tag
+ * beiscsi_boot_get_shandle()- Get boot session handle
+ * @phba: device priv structure instance
+ * @s_handle: session handle returned for boot session.
  *
- * Issue the MBX Cmd and wait for the completion of the
- * command.
+ * return
+ *     Success: 1
+ *     Failure: negative
  *
- * returns
- *     Success: 0
- *     Failure: Non-Xero Value
  **/
-int mgmt_set_vlan(struct beiscsi_hba *phba,
-                  uint16_t vlan_tag)
+int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle)
 {
-       int rc;
+       struct be_cmd_get_boot_target_resp *boot_resp;
+       struct be_mcc_wrb *wrb;
        unsigned int tag;
+       int rc;
 
-       tag = be_cmd_set_vlan(phba, vlan_tag);
+       *s_handle = BE_BOOT_INVALID_SHANDLE;
+       /* get configured boot session count and handle */
+       tag = __beiscsi_boot_get_shandle(phba, 0);
        if (!tag) {
                beiscsi_log(phba, KERN_ERR,
-                           (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
-                           "BG_%d : VLAN Setting Failed\n");
-               return -EBUSY;
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+                           "BG_%d : Getting Boot Target Info Failed\n");
+               return -EAGAIN;
        }
 
-       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+       rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
        if (rc) {
                beiscsi_log(phba, KERN_ERR,
-                           (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
-                           "BS_%d : VLAN MBX Cmd Failed\n");
-               return rc;
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                           "BG_%d : MBX CMD get_boot_target Failed\n");
+               return -EBUSY;
        }
-       return rc;
+
+       boot_resp = embedded_payload(wrb);
+       /* check if there are any boot targets configured */
+       if (!boot_resp->boot_session_count) {
+               __beiscsi_log(phba, KERN_INFO,
+                             "BG_%d : No boot targets configured\n");
+               return -ENXIO;
+       }
+
+       /* only if FW has logged in to the boot target, s_handle is valid */
+       *s_handle = boot_resp->boot_session_handle;
+       return 1;
 }
 
 /**
@@ -1645,7 +1352,6 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
 {
        struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
 
-       memset(pwrb, 0, sizeof(*pwrb));
        AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
                      max_send_data_segment_length, pwrb,
                      params->dw[offsetof(struct amap_beiscsi_offload_params,
@@ -1717,8 +1423,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
 {
        struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
 
-       memset(pwrb, 0, sizeof(*pwrb));
-
        AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
                      max_burst_length, pwrb, params->dw[offsetof
                      (struct amap_beiscsi_offload_params,
@@ -1790,70 +1494,3 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
                     (params->dw[offsetof(struct amap_beiscsi_offload_params,
                      exp_statsn) / 32] + 1));
 }
-
-/**
- * beiscsi_logout_fw_sess()- Firmware Session Logout
- * @phba: Device priv structure instance
- * @fw_sess_handle: FW session handle
- *
- * Logout from the FW established sessions.
- * returns
- *  Success: 0
- *  Failure: Non-Zero Value
- *
- */
-int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
-               uint32_t fw_sess_handle)
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_logout_fw_sess *req;
-       struct be_cmd_resp_logout_fw_sess *resp;
-       unsigned int tag;
-       int rc;
-
-       beiscsi_log(phba, KERN_INFO,
-                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                   "BG_%d : In bescsi_logout_fwboot_sess\n");
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               beiscsi_log(phba, KERN_INFO,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BG_%d : MBX Tag Failure\n");
-               return -EINVAL;
-       }
-
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-                          OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
-                          sizeof(struct be_cmd_req_logout_fw_sess));
-
-       /* Set the session handle */
-       req->session_handle = fw_sess_handle;
-       be_mcc_notify(phba, tag);
-       mutex_unlock(&ctrl->mbox_lock);
-
-       rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
-       if (rc) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                           "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n");
-               return -EBUSY;
-       }
-
-       resp = embedded_payload(wrb);
-       if (resp->session_status !=
-               BEISCSI_MGMT_SESSION_CLOSE) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-                           "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n",
-                           resp->session_status);
-               rc = -EINVAL;
-       }
-
-       return rc;
-}
index f3a48a04b2ca6d3254e61777f7939410cac0fa53..b897cfd57c72a3ff52ede0d86418218b764773dc 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  * Emulex
  * 3333 Susan Street
@@ -96,7 +96,6 @@ struct mcc_wrb {
        struct mcc_wrb_payload payload;
 };
 
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
 int mgmt_open_connection(struct beiscsi_hba *phba,
                         struct sockaddr *dst_addr,
                         struct beiscsi_endpoint *beiscsi_ep,
@@ -266,50 +265,41 @@ struct beiscsi_endpoint {
        u16 cid_vld;
 };
 
-int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
-                                struct beiscsi_hba *phba);
-int mgmt_get_port_name(struct be_ctrl_info *ctrl,
-                      struct beiscsi_hba *phba);
-
 unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
                                         struct beiscsi_endpoint *beiscsi_ep,
                                         unsigned short cid,
                                         unsigned short issue_reset,
                                         unsigned short savecfg_flag);
 
-int mgmt_set_ip(struct beiscsi_hba *phba,
-               struct iscsi_iface_param_info *ip_param,
-               struct iscsi_iface_param_info *subnet_param,
-               uint32_t boot_proto);
+int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
 
-unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
+                        u8 *ip, u8 *subnet);
 
-unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
-                                 unsigned int reopen_type,
-                                 unsigned sess_handle);
+int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw);
 
-unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
-                                  u32 boot_session_handle,
-                                  struct be_dma_mem *nonemb_cmd);
+int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
+                     struct be_cmd_get_def_gateway_resp *resp);
 
 int mgmt_get_nic_conf(struct beiscsi_hba *phba,
                      struct be_cmd_get_nic_conf_resp *mac);
 
-int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-                    struct be_cmd_get_if_info_resp **if_info);
+int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
+                       struct be_cmd_get_if_info_resp **if_info);
+
+unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba);
+
+int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 
-int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
-                    struct be_cmd_get_def_gateway_resp *gateway);
+unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba);
 
-int mgmt_set_gateway(struct beiscsi_hba *phba,
-                    struct iscsi_iface_param_info *gateway_param);
+unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba);
 
-int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
-                             unsigned int *s_handle);
+unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba);
 
-unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
+unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async);
 
-int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle);
 
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
                               struct device_attribute *attr, char *buf);
@@ -339,7 +329,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
                             struct wrb_handle *pwrb_handle,
                             struct hwi_wrb_context *pwrb_context);
 
-void beiscsi_ue_detect(struct beiscsi_hba *phba);
 int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
                         struct be_set_eqd *, int num);
 
index 7733ad5305d4100bd87b472dceb8e98bbc53373d..4ddda72f60e62b8f787c2843d32d40fd4851f75b 100644 (file)
@@ -5827,13 +5827,13 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
        bfa_port_speed_t max_speed = 0;
        struct bfa_port_attr_s port_attr;
        bfa_port_speed_t port_speed, rport_speed;
-       bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
-
+       bfa_boolean_t trl_enabled;
 
        if (port == NULL)
                return 0;
 
        fcs = port->fcs;
+       trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
 
        /* Get Physical port's current speed */
        bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
index ce1507023132369d073c89577bedf5e8d36138e3..c91fe6fe8d088323de2aa3833462ee6890d9c061 100644 (file)
@@ -709,14 +709,13 @@ int cxlflash_disk_release(struct scsi_device *sdev,
  * @cfg:       Internal structure associated with the host.
  * @ctxi:      Context to release.
  *
- * This routine is safe to be called with a a non-initialized context
- * and is tolerant of being called with the context's mutex held (it
- * will be unlocked if necessary before freeing). Also note that the
- * routine conditionally checks for the existence of the context control
- * map before clearing the RHT registers and context capabilities because
- * it is possible to destroy a context while the context is in the error
- * state (previous mapping was removed [so there is no need to worry about
- * clearing] and context is waiting for a new mapping).
+ * This routine is safe to be called with a a non-initialized context.
+ * Also note that the routine conditionally checks for the existence
+ * of the context control map before clearing the RHT registers and
+ * context capabilities because it is possible to destroy a context
+ * while the context is in the error state (previous mapping was
+ * removed [so there is no need to worry about clearing] and context
+ * is waiting for a new mapping).
  */
 static void destroy_context(struct cxlflash_cfg *cfg,
                            struct ctx_info *ctxi)
@@ -732,9 +731,6 @@ static void destroy_context(struct cxlflash_cfg *cfg,
                        writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
                        writeq_be(0, &ctxi->ctrl_map->ctx_cap);
                }
-
-               if (mutex_is_locked(&ctxi->mutex))
-                       mutex_unlock(&ctxi->mutex);
        }
 
        /* Free memory associated with context */
@@ -792,32 +788,58 @@ err:
  * @cfg:       Internal structure associated with the host.
  * @ctx:       Previously obtained CXL context reference.
  * @ctxid:     Previously obtained process element associated with CXL context.
- * @adap_fd:   Previously obtained adapter fd associated with CXL context.
  * @file:      Previously obtained file associated with CXL context.
  * @perms:     User-specified permissions.
- *
- * Upon return, the context is marked as initialized and the context's mutex
- * is locked.
  */
 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
-                        struct cxl_context *ctx, int ctxid, int adap_fd,
-                        struct file *file, u32 perms)
+                        struct cxl_context *ctx, int ctxid, struct file *file,
+                        u32 perms)
 {
        struct afu *afu = cfg->afu;
 
        ctxi->rht_perms = perms;
        ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
        ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
-       ctxi->lfd = adap_fd;
        ctxi->pid = current->tgid; /* tgid = pid */
        ctxi->ctx = ctx;
+       ctxi->cfg = cfg;
        ctxi->file = file;
        ctxi->initialized = true;
        mutex_init(&ctxi->mutex);
+       kref_init(&ctxi->kref);
        INIT_LIST_HEAD(&ctxi->luns);
        INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
+}
 
+/**
+ * remove_context() - context kref release handler
+ * @kref:      Kernel reference associated with context to be removed.
+ *
+ * When a context no longer has any references it can safely be removed
+ * from global access and destroyed. Note that it is assumed the thread
+ * relinquishing access to the context holds its mutex.
+ */
+static void remove_context(struct kref *kref)
+{
+       struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
+       struct cxlflash_cfg *cfg = ctxi->cfg;
+       u64 ctxid = DECODE_CTXID(ctxi->ctxid);
+
+       /* Remove context from table/error list */
+       WARN_ON(!mutex_is_locked(&ctxi->mutex));
+       ctxi->unavail = true;
+       mutex_unlock(&ctxi->mutex);
+       mutex_lock(&cfg->ctx_tbl_list_mutex);
        mutex_lock(&ctxi->mutex);
+
+       if (!list_empty(&ctxi->list))
+               list_del(&ctxi->list);
+       cfg->ctx_tbl[ctxid] = NULL;
+       mutex_unlock(&cfg->ctx_tbl_list_mutex);
+       mutex_unlock(&ctxi->mutex);
+
+       /* Context now completely uncoupled/unreachable */
+       destroy_context(cfg, ctxi);
 }
 
 /**
@@ -845,7 +867,6 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
 
        int i;
        int rc = 0;
-       int lfd;
        u64 ctxid = DECODE_CTXID(detach->context_id),
            rctxid = detach->context_id;
 
@@ -887,40 +908,12 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
                        break;
                }
 
-       /* Tear down context following last LUN cleanup */
-       if (list_empty(&ctxi->luns)) {
-               ctxi->unavail = true;
-               mutex_unlock(&ctxi->mutex);
-               mutex_lock(&cfg->ctx_tbl_list_mutex);
-               mutex_lock(&ctxi->mutex);
-
-               /* Might not have been in error list so conditionally remove */
-               if (!list_empty(&ctxi->list))
-                       list_del(&ctxi->list);
-               cfg->ctx_tbl[ctxid] = NULL;
-               mutex_unlock(&cfg->ctx_tbl_list_mutex);
-               mutex_unlock(&ctxi->mutex);
-
-               lfd = ctxi->lfd;
-               destroy_context(cfg, ctxi);
-               ctxi = NULL;
-               put_ctx = false;
-
-               /*
-                * As a last step, clean up external resources when not
-                * already on an external cleanup thread, i.e.: close(adap_fd).
-                *
-                * NOTE: this will free up the context from the CXL services,
-                * allowing it to dole out the same context_id on a future
-                * (or even currently in-flight) disk_attach operation.
-                */
-               if (lfd != -1)
-                       sys_close(lfd);
-       }
-
-       /* Release the sdev reference that bound this LUN to the context */
+       /*
+        * Release the context reference and the sdev reference that
+        * bound this LUN to the context.
+        */
+       put_ctx = !kref_put(&ctxi->kref, remove_context);
        scsi_device_put(sdev);
-
 out:
        if (put_ctx)
                put_context(ctxi);
@@ -941,34 +934,18 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
  *
  * This routine is the release handler for the fops registered with
  * the CXL services on an initial attach for a context. It is called
- * when a close is performed on the adapter file descriptor returned
- * to the user. Programmatically, the user is not required to perform
- * the close, as it is handled internally via the detach ioctl when
- * a context is being removed. Note that nothing prevents the user
- * from performing a close, but the user should be aware that doing
- * so is considered catastrophic and subsequent usage of the superpipe
- * API with previously saved off tokens will fail.
- *
- * When initiated from an external close (either by the user or via
- * a process tear down), the routine derives the context reference
- * and calls detach for each LUN associated with the context. The
- * final detach operation will cause the context itself to be freed.
- * Note that the saved off lfd is reset prior to calling detach to
- * signify that the final detach should not perform a close.
- *
- * When initiated from a detach operation as part of the tear down
- * of a context, the context is first completely freed and then the
- * close is performed. This routine will fail to derive the context
- * reference (due to the context having already been freed) and then
- * call into the CXL release entry point.
+ * when a close (explicity by the user or as part of a process tear
+ * down) is performed on the adapter file descriptor returned to the
+ * user. The user should be aware that explicitly performing a close
+ * considered catastrophic and subsequent usage of the superpipe API
+ * with previously saved off tokens will fail.
  *
- * Thus, with exception to when the CXL process element (context id)
- * lookup fails (a case that should theoretically never occur), every
- * call into this routine results in a complete freeing of a context.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
+ * This routine derives the context reference and calls detach for
+ * each LUN associated with the context.The final detach operation
+ * causes the context itself to be freed. With exception to when the
+ * CXL process element (context id) lookup fails (a case that should
+ * theoretically never occur), every call into this routine results
+ * in a complete freeing of a context.
  *
  * Return: 0 on success
  */
@@ -1006,11 +983,8 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
                goto out;
        }
 
-       dev_dbg(dev, "%s: close(%d) for context %d\n",
-               __func__, ctxi->lfd, ctxid);
+       dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid);
 
-       /* Reset the file descriptor to indicate we're on a close() thread */
-       ctxi->lfd = -1;
        detach.context_id = ctxi->ctxid;
        list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
                _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
@@ -1110,8 +1084,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto err;
        }
 
-       dev_dbg(dev, "%s: fault(%d) for context %d\n",
-               __func__, ctxi->lfd, ctxid);
+       dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
 
        if (likely(!ctxi->err_recovery_active)) {
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1186,8 +1159,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
                goto out;
        }
 
-       dev_dbg(dev, "%s: mmap(%d) for context %d\n",
-               __func__, ctxi->lfd, ctxid);
+       dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
 
        rc = cxl_fd_mmap(file, vma);
        if (likely(!rc)) {
@@ -1377,12 +1349,12 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
        lun_access->lli = lli;
        lun_access->sdev = sdev;
 
-       /* Non-NULL context indicates reuse */
+       /* Non-NULL context indicates reuse (another context reference) */
        if (ctxi) {
                dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
                        __func__, rctxid);
+               kref_get(&ctxi->kref);
                list_add(&lun_access->list, &ctxi->luns);
-               fd = ctxi->lfd;
                goto out_attach;
        }
 
@@ -1430,7 +1402,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
        perms = SISL_RHT_PERM(attach->hdr.flags + 1);
 
        /* Context mutex is locked upon return */
-       init_context(ctxi, cfg, ctx, ctxid, fd, file, perms);
+       init_context(ctxi, cfg, ctx, ctxid, file, perms);
 
        rc = afu_attach(cfg, ctxi);
        if (unlikely(rc)) {
@@ -1445,7 +1417,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
         * knows about us yet; we can be the only one holding our mutex.
         */
        list_add(&lun_access->list, &ctxi->luns);
-       mutex_unlock(&ctxi->mutex);
        mutex_lock(&cfg->ctx_tbl_list_mutex);
        mutex_lock(&ctxi->mutex);
        cfg->ctx_tbl[ctxid] = ctxi;
@@ -1453,7 +1424,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
        fd_install(fd, file);
 
 out_attach:
-       attach->hdr.return_flags = 0;
+       if (fd != -1)
+               attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD;
+       else
+               attach->hdr.return_flags = 0;
+
        attach->context_id = ctxi->ctxid;
        attach->block_size = gli->blk_len;
        attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1494,7 +1469,7 @@ err:
                file = NULL;
        }
 
-       /* Cleanup our context; safe to call even with mutex locked */
+       /* Cleanup our context */
        if (ctxi) {
                destroy_context(cfg, ctxi);
                ctxi = NULL;
@@ -1509,16 +1484,19 @@ err:
  * recover_context() - recovers a context in error
  * @cfg:       Internal structure associated with the host.
  * @ctxi:      Context to release.
+ * @adap_fd:   Adapter file descriptor associated with new/recovered context.
  *
  * Restablishes the state for a context-in-error.
  *
  * Return: 0 on success, -errno on failure
  */
-static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+static int recover_context(struct cxlflash_cfg *cfg,
+                          struct ctx_info *ctxi,
+                          int *adap_fd)
 {
        struct device *dev = &cfg->dev->dev;
        int rc = 0;
-       int old_fd, fd = -1;
+       int fd = -1;
        int ctxid = -1;
        struct file *file;
        struct cxl_context *ctx;
@@ -1566,9 +1544,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
         * No error paths after this point. Once the fd is installed it's
         * visible to user space and can't be undone safely on this thread.
         */
-       old_fd = ctxi->lfd;
        ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
-       ctxi->lfd = fd;
        ctxi->ctx = ctx;
        ctxi->file = file;
 
@@ -1585,9 +1561,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
        cfg->ctx_tbl[ctxid] = ctxi;
        mutex_unlock(&cfg->ctx_tbl_list_mutex);
        fd_install(fd, file);
-
-       /* Release the original adapter fd and associated CXL resources */
-       sys_close(old_fd);
+       *adap_fd = fd;
 out:
        dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
                __func__, ctxid, fd, rc);
@@ -1646,6 +1620,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
            rctxid = recover->context_id;
        long reg;
        int lretry = 20; /* up to 2 seconds */
+       int new_adap_fd = -1;
        int rc = 0;
 
        atomic_inc(&cfg->recovery_threads);
@@ -1675,7 +1650,7 @@ retry:
 
        if (ctxi->err_recovery_active) {
 retry_recover:
-               rc = recover_context(cfg, ctxi);
+               rc = recover_context(cfg, ctxi, &new_adap_fd);
                if (unlikely(rc)) {
                        dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
                                __func__, ctxid, rc);
@@ -1697,9 +1672,9 @@ retry_recover:
 
                ctxi->err_recovery_active = false;
                recover->context_id = ctxi->ctxid;
-               recover->adap_fd = ctxi->lfd;
+               recover->adap_fd = new_adap_fd;
                recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
-               recover->hdr.return_flags |=
+               recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
                        DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
                goto out;
        }
index 5f9a091fda95e341d02f896a6bc5799641e07832..9e62ff304e4b9778b2dd17c743afad43280bf8b6 100644 (file)
@@ -100,13 +100,14 @@ struct ctx_info {
 
        struct cxl_ioctl_start_work work;
        u64 ctxid;
-       int lfd;
        pid_t pid;
        bool initialized;
        bool unavail;
        bool err_recovery_active;
        struct mutex mutex; /* Context protection */
+       struct kref kref;
        struct cxl_context *ctx;
+       struct cxlflash_cfg *cfg;
        struct list_head luns;  /* LUNs attached to this context */
        const struct vm_operations_struct *cxl_mmap_vmops;
        struct file *file;
index 50f8e93007704667fb0c1311834c0a5f0d50ff52..90c5d7f5278e2350c4b65098c6a3124d76fd7f6c 100644 (file)
@@ -1135,14 +1135,13 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
            ctxid_dst = DECODE_CTXID(clone->context_id_dst),
            rctxid_src = clone->context_id_src,
            rctxid_dst = clone->context_id_dst;
-       int adap_fd_src = clone->adap_fd_src;
        int i, j;
        int rc = 0;
        bool found;
        LIST_HEAD(sidecar);
 
-       pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n",
-                __func__, ctxid_src, ctxid_dst, adap_fd_src);
+       pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n",
+                __func__, ctxid_src, ctxid_dst);
 
        /* Do not clone yourself */
        if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1166,13 +1165,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
                goto out;
        }
 
-       if (unlikely(adap_fd_src != ctxi_src->lfd)) {
-               pr_debug("%s: Invalid source adapter fd! (%d)\n",
-                        __func__, adap_fd_src);
-               rc = -EINVAL;
-               goto out;
-       }
-
        /* Verify there is no open resource handle in the destination context */
        for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
                if (ctxi_dst->rht_start[i].nmask != 0) {
@@ -1257,7 +1249,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
 
 out_success:
        list_splice(&sidecar, &ctxi_dst->luns);
-       sys_close(adap_fd_src);
 
        /* fall through */
 out:
index 78ce4d61a69bbab88e1f90d52ea8c3084c12f0fe..d6e53aee22952190af34ce0f99fd1984a4d6a9c1 100644 (file)
@@ -963,10 +963,6 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
 
        /* initialize the allocated memory */
        if (test_bit(AF_FIRST_INIT, &a->flags)) {
-               memset(a->req_table, 0,
-                      (num_requests + num_ae_requests +
-                       1) * sizeof(struct esas2r_request *));
-
                esas2r_targ_db_initialize(a);
 
                /* prime parts of the inbound list */
index 2aca4d16f39eba873df26e469c929f6bb0a5140a..5092c821d0887d09c23679bde31e1295eb71d518 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
        int length = min(sizeof(struct atto_ioctl), count);
 
        if (!a->local_atto_ioctl) {
-               a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+               a->local_atto_ioctl = kmalloc(sizeof(struct atto_ioctl),
                                              GFP_KERNEL);
                if (a->local_atto_ioctl == NULL) {
                        esas2r_log(ESAS2R_LOG_WARN,
index 7028dd37e5dd4bc6ffe27d4f57d7f58336d4412c..c164eec5430860a785423544b30fdddf989d6cd9 100644 (file)
@@ -83,6 +83,41 @@ static struct notifier_block libfcoe_notifier = {
        .notifier_call = libfcoe_device_notification,
 };
 
+static const struct {
+       u32 fc_port_speed;
+#define SPEED_2000     2000
+#define SPEED_4000     4000
+#define SPEED_8000     8000
+#define SPEED_16000    16000
+#define SPEED_32000    32000
+       u32 eth_port_speed;
+} fcoe_port_speed_mapping[] = {
+       { FC_PORTSPEED_1GBIT,   SPEED_1000   },
+       { FC_PORTSPEED_2GBIT,   SPEED_2000   },
+       { FC_PORTSPEED_4GBIT,   SPEED_4000   },
+       { FC_PORTSPEED_8GBIT,   SPEED_8000   },
+       { FC_PORTSPEED_10GBIT,  SPEED_10000  },
+       { FC_PORTSPEED_16GBIT,  SPEED_16000  },
+       { FC_PORTSPEED_20GBIT,  SPEED_20000  },
+       { FC_PORTSPEED_25GBIT,  SPEED_25000  },
+       { FC_PORTSPEED_32GBIT,  SPEED_32000  },
+       { FC_PORTSPEED_40GBIT,  SPEED_40000  },
+       { FC_PORTSPEED_50GBIT,  SPEED_50000  },
+       { FC_PORTSPEED_100GBIT, SPEED_100000 },
+};
+
+static inline u32 eth2fc_speed(u32 eth_port_speed)
+{
+       int i;
+
+       for (i = 0; i <= ARRAY_SIZE(fcoe_port_speed_mapping); i++) {
+               if (fcoe_port_speed_mapping[i].eth_port_speed == eth_port_speed)
+                       return fcoe_port_speed_mapping[i].fc_port_speed;
+       }
+
+       return FC_PORTSPEED_UNKNOWN;
+}
+
 /**
  * fcoe_link_speed_update() - Update the supported and actual link speeds
  * @lport: The local port to update speeds for
@@ -126,23 +161,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
                            SUPPORTED_40000baseLR4_Full))
                        lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
 
-               switch (ecmd.base.speed) {
-               case SPEED_1000:
-                       lport->link_speed = FC_PORTSPEED_1GBIT;
-                       break;
-               case SPEED_10000:
-                       lport->link_speed = FC_PORTSPEED_10GBIT;
-                       break;
-               case SPEED_20000:
-                       lport->link_speed = FC_PORTSPEED_20GBIT;
-                       break;
-               case SPEED_40000:
-                       lport->link_speed = FC_PORTSPEED_40GBIT;
-                       break;
-               default:
-                       lport->link_speed = FC_PORTSPEED_UNKNOWN;
-                       break;
-               }
+               lport->link_speed = eth2fc_speed(ecmd.base.speed);
                return 0;
        }
        return -1;
index 4731d324132361f011221055b8516a58e94b9dab..ca55ec2974e0902a46ab5fca922086975fa2794e 100644 (file)
@@ -23,7 +23,7 @@
 #include <scsi/sas_ata.h>
 #include <scsi/libsas.h>
 
-#define DRV_VERSION "v1.5"
+#define DRV_VERSION "v1.6"
 
 #define HISI_SAS_MAX_PHYS      9
 #define HISI_SAS_MAX_QUEUES    32
@@ -56,6 +56,11 @@ enum dev_status {
        HISI_SAS_DEV_EH,
 };
 
+enum {
+       HISI_SAS_INT_ABT_CMD = 0,
+       HISI_SAS_INT_ABT_DEV = 1,
+};
+
 enum hisi_sas_dev_type {
        HISI_SAS_DEV_TYPE_STP = 0,
        HISI_SAS_DEV_TYPE_SSP,
@@ -146,6 +151,9 @@ struct hisi_sas_hw {
                        struct hisi_sas_slot *slot);
        int (*prep_stp)(struct hisi_hba *hisi_hba,
                        struct hisi_sas_slot *slot);
+       int (*prep_abort)(struct hisi_hba *hisi_hba,
+                         struct hisi_sas_slot *slot,
+                         int device_id, int abort_flag, int tag_to_abort);
        int (*slot_complete)(struct hisi_hba *hisi_hba,
                             struct hisi_sas_slot *slot, int abort);
        void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
index 18dd5ea2c721e021be7214bd83952ce5cce9f4e9..85c73d311e4d75669841be26e2be1ffd9a233d7e 100644 (file)
 
 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
                                u8 *lun, struct hisi_sas_tmf_task *tmf);
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+                            struct domain_device *device,
+                            int abort_flag, int tag);
 
 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
 {
@@ -116,6 +120,14 @@ static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
        return hisi_hba->hw->prep_stp(hisi_hba, slot);
 }
 
+static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
+               struct hisi_sas_slot *slot,
+               int device_id, int abort_flag, int tag_to_abort)
+{
+       return hisi_hba->hw->prep_abort(hisi_hba, slot,
+                       device_id, abort_flag, tag_to_abort);
+}
+
 /*
  * This function will issue an abort TMF regardless of whether the
  * task is in the sdev or not. Then it will do the task complete
@@ -192,7 +204,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
                return rc;
        }
        port = device->port->lldd_port;
-       if (port && !port->port_attached && !tmf) {
+       if (port && !port->port_attached) {
                if (sas_protocol_ata(task->task_proto)) {
                        struct task_status_struct *ts = &task->task_status;
 
@@ -609,6 +621,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
        dev_info(dev, "found dev[%lld:%x] is gone\n",
                 sas_dev->device_id, sas_dev->dev_type);
 
+       hisi_sas_internal_task_abort(hisi_hba, device,
+                                    HISI_SAS_INT_ABT_DEV, 0);
+
        hisi_hba->hw->free_device(hisi_hba, sas_dev);
        device->lldd_dev = NULL;
        memset(sas_dev, 0, sizeof(*sas_dev));
@@ -728,6 +743,12 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
                        break;
                }
 
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                       task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+                       res = TMF_RESP_FUNC_SUCC;
+                       break;
+               }
+
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
                      task->task_status.stat == SAS_DATA_UNDERRUN) {
                        /* no error, but return the number of bytes of
@@ -826,18 +847,22 @@ static int hisi_sas_abort_task(struct sas_task *task)
                        }
                }
 
+               hisi_sas_internal_task_abort(hisi_hba, device,
+                                            HISI_SAS_INT_ABT_CMD, tag);
        } else if (task->task_proto & SAS_PROTOCOL_SATA ||
                task->task_proto & SAS_PROTOCOL_STP) {
                if (task->dev->dev_type == SAS_SATA_DEV) {
-                       struct hisi_slot_info *slot = task->lldd_task;
-
-                       dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
-                                  hisi_hba, task, slot);
-                       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+                       hisi_sas_internal_task_abort(hisi_hba, device,
+                                                    HISI_SAS_INT_ABT_DEV, 0);
                        rc = TMF_RESP_FUNC_COMPLETE;
-                       goto out;
                }
+       } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+               /* SMP */
+               struct hisi_sas_slot *slot = task->lldd_task;
+               u32 tag = slot->idx;
 
+               hisi_sas_internal_task_abort(hisi_hba, device,
+                                            HISI_SAS_INT_ABT_CMD, tag);
        }
 
 out:
@@ -954,6 +979,157 @@ static int hisi_sas_query_task(struct sas_task *task)
        return rc;
 }
 
+static int
+hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
+                                 struct sas_task *task, int abort_flag,
+                                 int task_tag)
+{
+       struct domain_device *device = task->dev;
+       struct hisi_sas_device *sas_dev = device->lldd_dev;
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct hisi_sas_port *port;
+       struct hisi_sas_slot *slot;
+       struct hisi_sas_cmd_hdr *cmd_hdr_base;
+       int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
+
+       if (!device->port)
+               return -1;
+
+       port = device->port->lldd_port;
+
+       /* simply get a slot and send abort command */
+       rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
+       if (rc)
+               goto err_out;
+       rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
+                                        &dlvry_queue_slot);
+       if (rc)
+               goto err_out_tag;
+
+       slot = &hisi_hba->slot_info[slot_idx];
+       memset(slot, 0, sizeof(struct hisi_sas_slot));
+
+       slot->idx = slot_idx;
+       slot->n_elem = n_elem;
+       slot->dlvry_queue = dlvry_queue;
+       slot->dlvry_queue_slot = dlvry_queue_slot;
+       cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
+       slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
+       slot->task = task;
+       slot->port = port;
+       task->lldd_task = slot;
+
+       memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+
+       rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
+                                     abort_flag, task_tag);
+       if (rc)
+               goto err_out_tag;
+
+       /* Port structure is static for the HBA, so
+       *  even if the port is deformed it is ok
+       *  to reference.
+       */
+       list_add_tail(&slot->entry, &port->list);
+       spin_lock(&task->task_state_lock);
+       task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+       spin_unlock(&task->task_state_lock);
+
+       hisi_hba->slot_prep = slot;
+
+       sas_dev->running_req++;
+       /* send abort command to our chip */
+       hisi_hba->hw->start_delivery(hisi_hba);
+
+       return 0;
+
+err_out_tag:
+       hisi_sas_slot_index_free(hisi_hba, slot_idx);
+err_out:
+       dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
+
+       return rc;
+}
+
+/**
+ * hisi_sas_internal_task_abort -- execute an internal
+ * abort command for single IO command or a device
+ * @hisi_hba: host controller struct
+ * @device: domain device
+ * @abort_flag: mode of operation, device or single IO
+ * @tag: tag of IO to be aborted (only relevant to single
+ *       IO mode)
+ */
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+                            struct domain_device *device,
+                            int abort_flag, int tag)
+{
+       struct sas_task *task;
+       struct hisi_sas_device *sas_dev = device->lldd_dev;
+       struct device *dev = &hisi_hba->pdev->dev;
+       int res;
+       unsigned long flags;
+
+       if (!hisi_hba->hw->prep_abort)
+               return -EOPNOTSUPP;
+
+       task = sas_alloc_slow_task(GFP_KERNEL);
+       if (!task)
+               return -ENOMEM;
+
+       task->dev = device;
+       task->task_proto = device->tproto;
+       task->task_done = hisi_sas_task_done;
+       task->slow_task->timer.data = (unsigned long)task;
+       task->slow_task->timer.function = hisi_sas_tmf_timedout;
+       task->slow_task->timer.expires = jiffies + 20*HZ;
+       add_timer(&task->slow_task->timer);
+
+       /* Lock as we are alloc'ing a slot, which cannot be interrupted */
+       spin_lock_irqsave(&hisi_hba->lock, flags);
+       res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
+                                               task, abort_flag, tag);
+       spin_unlock_irqrestore(&hisi_hba->lock, flags);
+       if (res) {
+               del_timer(&task->slow_task->timer);
+               dev_err(dev, "internal task abort: executing internal task failed: %d\n",
+                       res);
+               goto exit;
+       }
+       wait_for_completion(&task->slow_task->completion);
+       res = TMF_RESP_FUNC_FAILED;
+
+       if (task->task_status.resp == SAS_TASK_COMPLETE &&
+               task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
+               res = TMF_RESP_FUNC_COMPLETE;
+               goto exit;
+       }
+
+       /* TMF timed out, return direct. */
+       if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+               if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+                       dev_err(dev, "internal task abort: timeout.\n");
+                       if (task->lldd_task) {
+                               struct hisi_sas_slot *slot = task->lldd_task;
+
+                               hisi_sas_slot_task_free(hisi_hba, task, slot);
+                       }
+               }
+       }
+
+exit:
+       dev_info(dev, "internal task abort: task to dev %016llx task=%p "
+               "resp: 0x%x sts 0x%x\n",
+               SAS_ADDR(device->sas_addr),
+               task,
+               task->task_status.resp, /* 0 is complete, -1 is undelivered */
+               task->task_status.stat);
+       sas_free_task(task);
+
+       return res;
+}
+
 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
 {
        hisi_sas_port_notify_formed(sas_phy);
index f96560431cf1081af9ea473e67dbe19c30d8af23..bf9b693a717fb46578c9a680bf5cb1450843c732 100644 (file)
 /* HW dma structures */
 /* Delivery queue header */
 /* dw0 */
+#define CMD_HDR_ABORT_FLAG_OFF         0
+#define CMD_HDR_ABORT_FLAG_MSK         (0x3 << CMD_HDR_ABORT_FLAG_OFF)
+#define CMD_HDR_ABORT_DEVICE_TYPE_OFF  2
+#define CMD_HDR_ABORT_DEVICE_TYPE_MSK  (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
 #define CMD_HDR_RESP_REPORT_OFF                5
 #define CMD_HDR_RESP_REPORT_MSK                (0x1 << CMD_HDR_RESP_REPORT_OFF)
 #define CMD_HDR_TLR_CTRL_OFF           6
 #define CMD_HDR_DIF_SGL_LEN_MSK                (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
 #define CMD_HDR_DATA_SGL_LEN_OFF       16
 #define CMD_HDR_DATA_SGL_LEN_MSK       (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+#define CMD_HDR_ABORT_IPTT_OFF         16
+#define CMD_HDR_ABORT_IPTT_MSK         (0xffff << CMD_HDR_ABORT_IPTT_OFF)
 
 /* Completion header */
 /* dw0 */
 #define CMPLT_HDR_RSPNS_XFRD_MSK       (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
 #define CMPLT_HDR_ERX_OFF              12
 #define CMPLT_HDR_ERX_MSK              (0x1 << CMPLT_HDR_ERX_OFF)
+#define CMPLT_HDR_ABORT_STAT_OFF       13
+#define CMPLT_HDR_ABORT_STAT_MSK       (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
+/* abort_stat */
+#define STAT_IO_NOT_VALID              0x1
+#define STAT_IO_NO_DEVICE              0x2
+#define STAT_IO_COMPLETE               0x3
+#define STAT_IO_ABORTED                        0x4
 /* dw1 */
 #define CMPLT_HDR_IPTT_OFF             0
 #define CMPLT_HDR_IPTT_MSK             (0xffff << CMPLT_HDR_IPTT_OFF)
@@ -1563,6 +1576,30 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
                goto out;
        }
 
+       /* Use SAS+TMF status codes */
+       switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
+                       >> CMPLT_HDR_ABORT_STAT_OFF) {
+       case STAT_IO_ABORTED:
+               /* this io has been aborted by abort command */
+               ts->stat = SAS_ABORTED_TASK;
+               goto out;
+       case STAT_IO_COMPLETE:
+               /* internal abort command complete */
+               ts->stat = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       case STAT_IO_NO_DEVICE:
+               ts->stat = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       case STAT_IO_NOT_VALID:
+               /* abort single io, controller don't find
+                * the io need to abort
+                */
+               ts->stat = TMF_RESP_FUNC_FAILED;
+               goto out;
+       default:
+               break;
+       }
+
        if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
                (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
 
@@ -1775,6 +1812,32 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
        return 0;
 }
 
+static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
+               struct hisi_sas_slot *slot,
+               int device_id, int abort_flag, int tag_to_abort)
+{
+       struct sas_task *task = slot->task;
+       struct domain_device *dev = task->dev;
+       struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+       struct hisi_sas_port *port = slot->port;
+
+       /* dw0 */
+       hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+                              (port->id << CMD_HDR_PORT_OFF) |
+                              ((dev_is_sata(dev) ? 1:0) <<
+                               CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+                              (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
+
+       /* dw1 */
+       hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
+
+       /* dw7 */
+       hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+       hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+       return 0;
+}
+
 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
        int i, res = 0;
@@ -2239,6 +2302,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
        .prep_smp = prep_smp_v2_hw,
        .prep_ssp = prep_ssp_v2_hw,
        .prep_stp = prep_ata_v2_hw,
+       .prep_abort = prep_abort_v2_hw,
        .get_free_slot = get_free_slot_v2_hw,
        .start_delivery = start_delivery_v2_hw,
        .slot_complete = slot_complete_v2_hw,
index ba9af4a2bd2ab3159ed415b5acc73328925aa3c3..9ab94adbddb7c2af725aaf67145b6163f3f6ead8 100644 (file)
@@ -246,10 +246,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
        shost->dma_dev = dma_dev;
 
-       error = device_add(&shost->shost_gendev);
-       if (error)
-               goto out_destroy_freelist;
-
        /*
         * Increase usage count temporarily here so that calling
         * scsi_autopm_put_host() will trigger runtime idle if there is
@@ -260,6 +256,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
        pm_runtime_enable(&shost->shost_gendev);
        device_enable_async_suspend(&shost->shost_gendev);
 
+       error = device_add(&shost->shost_gendev);
+       if (error)
+               goto out_destroy_freelist;
+
        scsi_host_set_state(shost, SHOST_RUNNING);
        get_device(shost->shost_gendev.parent);
 
@@ -309,6 +309,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
  out_del_gendev:
        device_del(&shost->shost_gendev);
  out_destroy_freelist:
+       device_disable_async_suspend(&shost->shost_gendev);
+       pm_runtime_disable(&shost->shost_gendev);
+       pm_runtime_set_suspended(&shost->shost_gendev);
+       pm_runtime_put_noidle(&shost->shost_gendev);
        scsi_destroy_command_freelist(shost);
  out_destroy_tags:
        if (shost_use_blk_mq(shost))
index ab67ec4b6bd632587e18bccc30ae72dceb8fdfd8..6b92169abaebf57e26f5367f50b0ddbe48e84192 100644 (file)
@@ -52,6 +52,7 @@ static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
 static LIST_HEAD(ibmvfc_head);
 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
 static struct scsi_transport_template *ibmvfc_transport_template;
@@ -86,6 +87,9 @@ MODULE_PARM_DESC(debug, "Enable driver debug information. "
 module_param_named(log_level, log_level, uint, 0);
 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
                 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+module_param_named(cls3_error, cls3_error, uint, 0);
+MODULE_PARM_DESC(log_level, "Enable FC Class 3 Error Recovery. "
+                "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
 
 static const struct {
        u16 status;
@@ -1335,6 +1339,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
        struct srp_direct_buf *data = &vfc_cmd->ioba;
        struct ibmvfc_host *vhost = dev_get_drvdata(dev);
 
+       if (cls3_error)
+               vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
+
        sg_mapped = scsi_dma_map(scmd);
        if (!sg_mapped) {
                vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
@@ -3381,6 +3388,10 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
        prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
        prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
        prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
+       prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
+
+       if (cls3_error)
+               prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
 
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
        if (ibmvfc_send_event(evt, vhost, default_timeout)) {
index 5c70a52ad3466cdb31a8207ac9b664285ad797c6..9a0696f68f37153a5b7647c0e5c22ac69574f583 100644 (file)
@@ -54,6 +54,7 @@
 #define IBMVFC_DEV_LOSS_TMO            (5 * 60)
 #define IBMVFC_DEFAULT_LOG_LEVEL       2
 #define IBMVFC_MAX_CDB_LEN             16
+#define IBMVFC_CLS3_ERROR              0
 
 /*
  * Ensure we have resources for ERP and initialization:
index b29fef9d0f2763559d8ca1e1cce48f81d46a48ba..4dd8e5effddb928b610d03c05c91d7a97f152406 100644 (file)
@@ -1606,8 +1606,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 
        if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
                list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
-                       pr_debug("send_messages cmd %p\n", cmd);
-
                        iue = cmd->iue;
 
                        crq->valid = VALID_CMD_RESP_EL;
@@ -1934,6 +1932,8 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
        /*
         * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
         */
+       target_wait_for_sess_cmds(se_sess);
+       transport_deregister_session_configfs(se_sess);
        transport_deregister_session(se_sess);
        tport->ibmv_nexus = NULL;
        kfree(nexus);
@@ -1978,7 +1978,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
                reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
        else if (fmt->buffers & (~SUPPORTED_FORMATS))
                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
-       else if ((fmt->buffers | SUPPORTED_FORMATS) == 0)
+       else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
                reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
 
        if (vscsi->state == SRP_PROCESSING)
@@ -2554,10 +2554,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
 
        srp->lun.scsi_lun[0] &= 0x3f;
 
-       pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
-                &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
-                attr);
-
        rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
                               cmd->sense_buf, scsilun_to_int(&srp->lun),
                               data_len, attr, dir, 0);
@@ -3142,8 +3138,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
        long tx_len;
        long rc = 0;
 
-       pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
-
        if (bytes == 0)
                return 0;
 
@@ -3195,9 +3189,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
                        /* write to client */
                        struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
 
-                       if (!READ_CMD(srp->cdb))
-                               print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
-                                                    sg_virt(sgp), buf_len);
                        /* The h_copy_rdma will cause phyp, running in another
                         * partition, to read memory, so we need to make sure
                         * the data has been written out, hence these syncs.
@@ -3322,12 +3313,9 @@ cmd_work:
                                rc = ibmvscsis_trans_event(vscsi, crq);
                        } else if (vscsi->flags & TRANS_EVENT) {
                                /*
-                                * if a tranport event has occurred leave
+                                * if a transport event has occurred leave
                                 * everything but transport events on the queue
-                                */
-                               pr_debug("handle_crq, ignoring\n");
-
-                               /*
+                                *
                                 * need to decrement the queue index so we can
                                 * look at the elment again
                                 */
@@ -3693,12 +3681,9 @@ static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
                                                 se_cmd);
        struct scsi_info *vscsi = cmd->adapter;
 
-       pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
-
        spin_lock_bh(&vscsi->intr_lock);
        /* Remove from active_q */
-       list_del(&cmd->list);
-       list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+       list_move_tail(&cmd->list, &vscsi->waiting_rsp);
        ibmvscsis_send_messages(vscsi);
        spin_unlock_bh(&vscsi->intr_lock);
 }
@@ -3715,9 +3700,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
        struct iu_entry *iue = cmd->iue;
        int rc;
 
-       pr_debug("write_pending, se_cmd %p, length 0x%x\n",
-                se_cmd, se_cmd->data_length);
-
        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
                               1, 1);
        if (rc) {
@@ -3756,9 +3738,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
        uint len = 0;
        int rc;
 
-       pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
-                se_cmd, se_cmd->data_length);
-
        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
                               1);
        if (rc) {
index 17d04c702e1ba13a1642d36a1de98fb7831665eb..c7c144f4a4f4a57be711ce9fcaab212007fab31e 100644 (file)
@@ -1473,7 +1473,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
-       list_del(&hostrcb->queue);
+       list_del_init(&hostrcb->queue);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 
        if (ioasc) {
@@ -2552,6 +2552,23 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
        }
 }
 
+static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
+{
+       struct ipr_hostrcb *hostrcb;
+
+       hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
+                                       struct ipr_hostrcb, queue);
+
+       if (unlikely(!hostrcb)) {
+               dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
+               hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
+                                               struct ipr_hostrcb, queue);
+       }
+
+       list_del_init(&hostrcb->queue);
+       return hostrcb;
+}
+
 /**
  * ipr_process_error - Op done function for an adapter error log.
  * @ipr_cmd:   ipr command struct
@@ -2569,13 +2586,14 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
        u32 fd_ioasc;
+       char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
 
        if (ioa_cfg->sis64)
                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
        else
                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 
-       list_del(&hostrcb->queue);
+       list_del_init(&hostrcb->queue);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 
        if (!ioasc) {
@@ -2588,6 +2606,10 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
                        "Host RCB failed with IOASC: 0x%08X\n", ioasc);
        }
 
+       list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+       hostrcb = ipr_get_free_hostrcb(ioa_cfg);
+       kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
+
        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
 }
 
@@ -4095,6 +4117,64 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
        .show = ipr_show_fw_type
 };
 
+static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
+                               struct bin_attribute *bin_attr, char *buf,
+                               loff_t off, size_t count)
+{
+       struct device *cdev = container_of(kobj, struct device, kobj);
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       struct ipr_hostrcb *hostrcb;
+       unsigned long lock_flags = 0;
+       int ret;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
+                                       struct ipr_hostrcb, queue);
+       if (!hostrcb) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return 0;
+       }
+       ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
+                               sizeof(hostrcb->hcam));
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return ret;
+}
+
+static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
+                               struct bin_attribute *bin_attr, char *buf,
+                               loff_t off, size_t count)
+{
+       struct device *cdev = container_of(kobj, struct device, kobj);
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       struct ipr_hostrcb *hostrcb;
+       unsigned long lock_flags = 0;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
+                                       struct ipr_hostrcb, queue);
+       if (!hostrcb) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return count;
+       }
+
+       /* Reclaim hostrcb before exit */
+       list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return count;
+}
+
+static struct bin_attribute ipr_ioa_async_err_log = {
+       .attr = {
+               .name =         "async_err_log",
+               .mode =         S_IRUGO | S_IWUSR,
+       },
+       .size = 0,
+       .read = ipr_read_async_err_log,
+       .write = ipr_next_async_err_log
+};
+
 static struct device_attribute *ipr_ioa_attrs[] = {
        &ipr_fw_version_attr,
        &ipr_log_level_attr,
@@ -7026,8 +7106,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct ipr_resource_entry *res;
-       struct ipr_hostrcb *hostrcb, *temp;
-       int i = 0, j;
+       int j;
 
        ENTER;
        ioa_cfg->in_reset_reload = 0;
@@ -7048,12 +7127,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
        }
        schedule_work(&ioa_cfg->work_q);
 
-       list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
-               list_del(&hostrcb->queue);
-               if (i++ < IPR_NUM_LOG_HCAMS)
-                       ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+       for (j = 0; j < IPR_NUM_HCAMS; j++) {
+               list_del_init(&ioa_cfg->hostrcb[j]->queue);
+               if (j < IPR_NUM_LOG_HCAMS)
+                       ipr_send_hcam(ioa_cfg,
+                               IPR_HCAM_CDB_OP_CODE_LOG_DATA,
+                               ioa_cfg->hostrcb[j]);
                else
-                       ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+                       ipr_send_hcam(ioa_cfg,
+                               IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
+                               ioa_cfg->hostrcb[j]);
        }
 
        scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
@@ -8335,7 +8418,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
 
        hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
                             struct ipr_hostrcb, queue);
-       list_del(&hostrcb->queue);
+       list_del_init(&hostrcb->queue);
        memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
 
        rc = ipr_get_ldump_data_section(ioa_cfg,
@@ -9332,7 +9415,7 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
        dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
 
-       for (i = 0; i < IPR_NUM_HCAMS; i++) {
+       for (i = 0; i < IPR_MAX_HCAMS; i++) {
                dma_free_coherent(&ioa_cfg->pdev->dev,
                                  sizeof(struct ipr_hostrcb),
                                  ioa_cfg->hostrcb[i],
@@ -9572,7 +9655,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
        if (!ioa_cfg->u.cfg_table)
                goto out_free_host_rrq;
 
-       for (i = 0; i < IPR_NUM_HCAMS; i++) {
+       for (i = 0; i < IPR_MAX_HCAMS; i++) {
                ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
                                                         sizeof(struct ipr_hostrcb),
                                                         &ioa_cfg->hostrcb_dma[i],
@@ -9714,6 +9797,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
 
        INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+       INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
        INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
@@ -10352,6 +10436,8 @@ static void ipr_remove(struct pci_dev *pdev)
                              &ipr_trace_attr);
        ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
                             &ipr_dump_attr);
+       sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
+                       &ipr_ioa_async_err_log);
        scsi_remove_host(ioa_cfg->host);
 
        __ipr_remove(pdev);
@@ -10400,10 +10486,25 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                return rc;
        }
 
+       rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
+                       &ipr_ioa_async_err_log);
+
+       if (rc) {
+               ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
+                               &ipr_dump_attr);
+               ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
+                               &ipr_trace_attr);
+               scsi_remove_host(ioa_cfg->host);
+               __ipr_remove(pdev);
+               return rc;
+       }
+
        rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
                                   &ipr_dump_attr);
 
        if (rc) {
+               sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
+                                     &ipr_ioa_async_err_log);
                ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
                                      &ipr_trace_attr);
                scsi_remove_host(ioa_cfg->host);
index cdb51960b53c6d1c5f7687a64226f60ba79cbe1e..4dbeaafa0ba2a8893a0c86c4d466a8a569560432 100644 (file)
 #define IPR_DEFAULT_MAX_ERROR_DUMP                     984
 #define IPR_NUM_LOG_HCAMS                              2
 #define IPR_NUM_CFG_CHG_HCAMS                          2
+#define IPR_NUM_HCAM_QUEUE                             12
 #define IPR_NUM_HCAMS  (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+#define IPR_MAX_HCAMS  (IPR_NUM_HCAMS + IPR_NUM_HCAM_QUEUE)
 
 #define IPR_MAX_SIS64_TARGETS_PER_BUS                  1024
 #define IPR_MAX_SIS64_LUNS_PER_TARGET                  0xffffffff
@@ -1532,10 +1534,11 @@ struct ipr_ioa_cfg {
 
        char ipr_hcam_label[8];
 #define IPR_HCAM_LABEL                 "hcams"
-       struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS];
-       dma_addr_t hostrcb_dma[IPR_NUM_HCAMS];
+       struct ipr_hostrcb *hostrcb[IPR_MAX_HCAMS];
+       dma_addr_t hostrcb_dma[IPR_MAX_HCAMS];
        struct list_head hostrcb_free_q;
        struct list_head hostrcb_pending_q;
+       struct list_head hostrcb_report_q;
 
        struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
        u32 hrrq_num;
index e72673b0a8fb4ba810097a4c57b8cc3d3b8d199f..16ca31ad5ec0abba2d0b08ad1d337ff8afcbbd9a 100644 (file)
@@ -1837,7 +1837,6 @@ static void fc_exch_reset(struct fc_exch *ep)
        int rc = 1;
 
        spin_lock_bh(&ep->ex_lock);
-       fc_exch_abort_locked(ep, 0);
        ep->state |= FC_EX_RST_CLEANUP;
        fc_exch_timer_cancel(ep);
        if (ep->esb_stat & ESB_ST_REC_QUAL)
index 93f5961821452b50d0094413658c34bc37cbc9ec..97aeaddd600d42c03feb79f3fa11bf6a80ce3229 100644 (file)
@@ -457,6 +457,9 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
  */
 static int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
+       struct fc_lport *lport = rdata->local_port;
+       u32 port_id = rdata->ids.port_id;
+
        mutex_lock(&rdata->rp_mutex);
 
        FC_RPORT_DBG(rdata, "Remove port\n");
@@ -466,6 +469,15 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata)
                FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
                goto out;
        }
+       /*
+        * FC-LS states:
+        * To explicitly Logout, the initiating Nx_Port shall terminate
+        * other open Sequences that it initiated with the destination
+        * Nx_Port prior to performing Logout.
+        */
+       lport->tt.exch_mgr_reset(lport, 0, port_id);
+       lport->tt.exch_mgr_reset(lport, port_id, 0);
+
        fc_rport_enter_logo(rdata);
 
        /*
@@ -547,16 +559,24 @@ static void fc_rport_timeout(struct work_struct *work)
  */
 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 {
+       struct fc_lport *lport = rdata->local_port;
+
        FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
                     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
                     fc_rport_state(rdata), rdata->retries);
 
        switch (rdata->rp_state) {
        case RPORT_ST_FLOGI:
-       case RPORT_ST_PLOGI:
                rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
                break;
+       case RPORT_ST_PLOGI:
+               if (lport->point_to_multipoint) {
+                       rdata->flags &= ~FC_RP_STARTED;
+                       fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
+               } else
+                       fc_rport_enter_logo(rdata);
+               break;
        case RPORT_ST_RTV:
                fc_rport_enter_ready(rdata);
                break;
@@ -1877,7 +1897,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
        spp->spp_type_ext = rspp->spp_type_ext;
        spp->spp_flags = FC_SPP_RESP_ACK;
 
-       fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+       fc_rport_enter_prli(rdata);
 
        fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
        lport->tt.frame_send(lport, fp);
@@ -1915,7 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
-               fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+               fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
        } else
index c1ed25adb17ec1f11b039854ffa62e590032f9d6..2d62d71782fabea6c7fca5736e4e339c2cde1078 100644 (file)
@@ -5036,7 +5036,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
        /* Find first memory bar */
        bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
-       instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+       instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
        if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
                                         "megasas: LSI")) {
                dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
@@ -6711,14 +6711,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
        unsigned long flags;
        u32 wait_time = MEGASAS_RESET_WAIT_TIME;
 
-       ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
-       if (!ioc)
-               return -ENOMEM;
-
-       if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
-               error = -EFAULT;
-               goto out_kfree_ioc;
-       }
+       ioc = memdup_user(user_ioc, sizeof(*ioc));
+       if (IS_ERR(ioc))
+               return PTR_ERR(ioc);
 
        instance = megasas_lookup_instance(ioc->host_no);
        if (!instance) {
index 750f82c339d4d6c1d07a11d1bba7d14aa0a56b10..a1a5ceb42ce6d3280dadd5a521e24e4af9a2aff8 100644 (file)
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(mpt3sas_fwfault_debug,
        " enable detection of firmware fault and halt firmware - (default=0)");
 
 static int
-_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
 
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -218,8 +218,7 @@ _base_fault_reset_work(struct work_struct *work)
        ioc->non_operational_loop = 0;
 
        if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
-               rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
                pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
                    __func__, (rc == 0) ? "success" : "failed");
                doorbell = mpt3sas_base_get_iocstate(ioc, 0);
@@ -2040,7 +2039,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
  * mpt3sas_base_unmap_resources - free controller resources
  * @ioc: per adapter object
  */
-void
+static void
 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 {
        struct pci_dev *pdev = ioc->pdev;
@@ -2145,7 +2144,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 
        _base_mask_interrupts(ioc);
 
-       r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+       r = _base_get_ioc_facts(ioc);
        if (r)
                goto out_fail;
 
@@ -3183,12 +3182,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 /**
  * _base_allocate_memory_pools - allocate start of day memory pools
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 success, anything else error
  */
 static int
-_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 {
        struct mpt3sas_facts *facts;
        u16 max_sge_elements;
@@ -3658,29 +3656,25 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
  * _base_wait_on_iocstate - waiting on a particular ioc state
  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
-       int sleep_flag)
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
 {
        u32 count, cntdn;
        u32 current_state;
 
        count = 0;
-       cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+       cntdn = 1000 * timeout;
        do {
                current_state = mpt3sas_base_get_iocstate(ioc, 1);
                if (current_state == ioc_state)
                        return 0;
                if (count && current_state == MPI2_IOC_STATE_FAULT)
                        break;
-               if (sleep_flag == CAN_SLEEP)
-                       usleep_range(1000, 1500);
-               else
-                       udelay(500);
+
+               usleep_range(1000, 1500);
                count++;
        } while (--cntdn);
 
@@ -3692,24 +3686,22 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
  * a write to the doorbell)
  * @ioc: per adapter object
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  *
  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  */
 static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
 
 static int
-_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
-       int sleep_flag)
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
        u32 cntdn, count;
        u32 int_status;
 
        count = 0;
-       cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+       cntdn = 1000 * timeout;
        do {
                int_status = readl(&ioc->chip->HostInterruptStatus);
                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
@@ -3718,10 +3710,35 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
                                ioc->name, __func__, count, timeout));
                        return 0;
                }
-               if (sleep_flag == CAN_SLEEP)
-                       usleep_range(1000, 1500);
-               else
-                       udelay(500);
+
+               usleep_range(1000, 1500);
+               count++;
+       } while (--cntdn);
+
+       pr_err(MPT3SAS_FMT
+               "%s: failed due to timeout count(%d), int_status(%x)!\n",
+               ioc->name, __func__, count, int_status);
+       return -EFAULT;
+}
+
+static int
+_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
+{
+       u32 cntdn, count;
+       u32 int_status;
+
+       count = 0;
+       cntdn = 2000 * timeout;
+       do {
+               int_status = readl(&ioc->chip->HostInterruptStatus);
+               if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+                       dhsprintk(ioc, pr_info(MPT3SAS_FMT
+                               "%s: successful count(%d), timeout(%d)\n",
+                               ioc->name, __func__, count, timeout));
+                       return 0;
+               }
+
+               udelay(500);
                count++;
        } while (--cntdn);
 
@@ -3729,13 +3746,13 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
                "%s: failed due to timeout count(%d), int_status(%x)!\n",
                ioc->name, __func__, count, int_status);
        return -EFAULT;
+
 }
 
 /**
  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
  * @ioc: per adapter object
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  *
@@ -3743,15 +3760,14 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
  * doorbell.
  */
 static int
-_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
-       int sleep_flag)
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
        u32 cntdn, count;
        u32 int_status;
        u32 doorbell;
 
        count = 0;
-       cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+       cntdn = 1000 * timeout;
        do {
                int_status = readl(&ioc->chip->HostInterruptStatus);
                if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
@@ -3769,10 +3785,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
                } else if (int_status == 0xFFFFFFFF)
                        goto out;
 
-               if (sleep_flag == CAN_SLEEP)
-                       usleep_range(1000, 1500);
-               else
-                       udelay(500);
+               usleep_range(1000, 1500);
                count++;
        } while (--cntdn);
 
@@ -3787,20 +3800,18 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
  * @ioc: per adapter object
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  *
  */
 static int
-_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
-       int sleep_flag)
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
        u32 cntdn, count;
        u32 doorbell_reg;
 
        count = 0;
-       cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+       cntdn = 1000 * timeout;
        do {
                doorbell_reg = readl(&ioc->chip->Doorbell);
                if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
@@ -3809,10 +3820,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
                                ioc->name, __func__, count, timeout));
                        return 0;
                }
-               if (sleep_flag == CAN_SLEEP)
-                       usleep_range(1000, 1500);
-               else
-                       udelay(500);
+
+               usleep_range(1000, 1500);
                count++;
        } while (--cntdn);
 
@@ -3827,13 +3836,11 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
  * @ioc: per adapter object
  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
-       int sleep_flag)
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
 {
        u32 ioc_state;
        int r = 0;
@@ -3852,12 +3859,11 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
 
        writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
            &ioc->chip->Doorbell);
-       if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+       if ((_base_wait_for_doorbell_ack(ioc, 15))) {
                r = -EFAULT;
                goto out;
        }
-       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
-           timeout, sleep_flag);
+       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
        if (ioc_state) {
                pr_err(MPT3SAS_FMT
                        "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -3879,18 +3885,16 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
  * @reply_bytes: reply length
  * @reply: pointer to reply payload
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
-       u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+       u32 *request, int reply_bytes, u16 *reply, int timeout)
 {
        MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
        int i;
        u8 failed;
-       u16 dummy;
        __le32 *mfp;
 
        /* make sure doorbell is not in use */
@@ -3911,7 +3915,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
            ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
            &ioc->chip->Doorbell);
 
-       if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+       if ((_base_spin_on_doorbell_int(ioc, 5))) {
                pr_err(MPT3SAS_FMT
                        "doorbell handshake int failed (line=%d)\n",
                        ioc->name, __LINE__);
@@ -3919,7 +3923,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
        writel(0, &ioc->chip->HostInterruptStatus);
 
-       if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+       if ((_base_wait_for_doorbell_ack(ioc, 5))) {
                pr_err(MPT3SAS_FMT
                        "doorbell handshake ack failed (line=%d)\n",
                        ioc->name, __LINE__);
@@ -3929,7 +3933,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        /* send message 32-bits at a time */
        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
                writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
-               if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+               if ((_base_wait_for_doorbell_ack(ioc, 5)))
                        failed = 1;
        }
 
@@ -3941,7 +3945,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
 
        /* now wait for the reply */
-       if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+       if ((_base_wait_for_doorbell_int(ioc, timeout))) {
                pr_err(MPT3SAS_FMT
                        "doorbell handshake int failed (line=%d)\n",
                        ioc->name, __LINE__);
@@ -3952,7 +3956,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
-       if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+       if ((_base_wait_for_doorbell_int(ioc, 5))) {
                pr_err(MPT3SAS_FMT
                        "doorbell handshake int failed (line=%d)\n",
                        ioc->name, __LINE__);
@@ -3963,22 +3967,22 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        writel(0, &ioc->chip->HostInterruptStatus);
 
        for (i = 2; i < default_reply->MsgLength * 2; i++)  {
-               if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+               if ((_base_wait_for_doorbell_int(ioc, 5))) {
                        pr_err(MPT3SAS_FMT
                                "doorbell handshake int failed (line=%d)\n",
                                ioc->name, __LINE__);
                        return -EFAULT;
                }
                if (i >=  reply_bytes/2) /* overflow case */
-                       dummy = readl(&ioc->chip->Doorbell);
+                       readl(&ioc->chip->Doorbell);
                else
                        reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
                            & MPI2_DOORBELL_DATA_MASK);
                writel(0, &ioc->chip->HostInterruptStatus);
        }
 
-       _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
-       if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+       _base_wait_for_doorbell_int(ioc, 5);
+       if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
                dhsprintk(ioc, pr_info(MPT3SAS_FMT
                        "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
        }
@@ -4015,7 +4019,6 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 {
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        bool issue_reset = false;
        int rc;
        void *request;
@@ -4068,7 +4071,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
                ioc->ioc_link_reset_in_progress = 1;
        init_completion(&ioc->base_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+       wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
@@ -4093,8 +4096,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
        rc = -EFAULT;
  out:
@@ -4119,7 +4121,6 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 {
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        bool issue_reset = false;
        int rc;
        void *request;
@@ -4170,7 +4171,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
        memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
        init_completion(&ioc->base_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+       wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4191,8 +4192,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
        rc = -EFAULT;
  out:
@@ -4203,12 +4203,11 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 /**
  * _base_get_port_facts - obtain port facts reply and save in ioc
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
 {
        Mpi2PortFactsRequest_t mpi_request;
        Mpi2PortFactsReply_t mpi_reply;
@@ -4224,7 +4223,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
        mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
        mpi_request.PortNumber = port;
        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
-           (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+           (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
        if (r != 0) {
                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4247,13 +4246,11 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
  * @ioc: per adapter object
  * @timeout:
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
-       int sleep_flag)
+_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
        u32 ioc_state;
        int rc;
@@ -4287,8 +4284,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
                goto issue_diag_reset;
        }
 
-       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
-           timeout, sleep_flag);
+       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
        if (ioc_state) {
                dfailprintk(ioc, printk(MPT3SAS_FMT
                    "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4297,19 +4293,18 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
        }
 
  issue_diag_reset:
-       rc = _base_diag_reset(ioc, sleep_flag);
+       rc = _base_diag_reset(ioc);
        return rc;
 }
 
 /**
  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
 {
        Mpi2IOCFactsRequest_t mpi_request;
        Mpi2IOCFactsReply_t mpi_reply;
@@ -4319,7 +4314,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
            __func__));
 
-       r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+       r = _base_wait_for_iocstate(ioc, 10);
        if (r) {
                dfailprintk(ioc, printk(MPT3SAS_FMT
                    "%s: failed getting to correct state\n",
@@ -4331,7 +4326,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
        memset(&mpi_request, 0, mpi_request_sz);
        mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
-           (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+           (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
        if (r != 0) {
                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4391,12 +4386,11 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 /**
  * _base_send_ioc_init - send ioc_init to firmware
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
 {
        Mpi2IOCInitRequest_t mpi_request;
        Mpi2IOCInitReply_t mpi_reply;
@@ -4479,8 +4473,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 
        r = _base_handshake_req_reply_wait(ioc,
            sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
-           sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
-           sleep_flag);
+           sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
 
        if (r != 0) {
                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4555,16 +4548,14 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 /**
  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
 {
        Mpi2PortEnableRequest_t *mpi_request;
        Mpi2PortEnableReply_t *mpi_reply;
-       unsigned long timeleft;
        int r = 0;
        u16 smid;
        u16 ioc_status;
@@ -4592,8 +4583,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 
        init_completion(&ioc->port_enable_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
-           300*HZ);
+       wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
        if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
                    ioc->name, __func__);
@@ -4737,15 +4727,13 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
 /**
  * _base_event_notification - send event notification
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
 {
        Mpi2EventNotificationRequest_t *mpi_request;
-       unsigned long timeleft;
        u16 smid;
        int r = 0;
        int i;
@@ -4777,7 +4765,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
                    cpu_to_le32(ioc->event_masks[i]);
        init_completion(&ioc->base_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+       wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
                    ioc->name, __func__);
@@ -4827,19 +4815,18 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
                return;
 
        mutex_lock(&ioc->base_cmds.mutex);
-       _base_event_notification(ioc, CAN_SLEEP);
+       _base_event_notification(ioc);
        mutex_unlock(&ioc->base_cmds.mutex);
 }
 
 /**
  * _base_diag_reset - the "big hammer" start of day reset
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 {
        u32 host_diagnostic;
        u32 ioc_state;
@@ -4867,10 +4854,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
                writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
 
                /* wait 100 msec */
-               if (sleep_flag == CAN_SLEEP)
-                       msleep(100);
-               else
-                       mdelay(100);
+               msleep(100);
 
                if (count++ > 20)
                        goto out;
@@ -4890,10 +4874,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
             &ioc->chip->HostDiagnostic);
 
        /*This delay allows the chip PCIe hardware time to finish reset tasks*/
-       if (sleep_flag == CAN_SLEEP)
-               msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
-       else
-               mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+       msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
 
        /* Approximately 300 second max wait */
        for (count = 0; count < (300000000 /
@@ -4906,13 +4887,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
                if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
                        break;
 
-               /* Wait to pass the second read delay window */
-               if (sleep_flag == CAN_SLEEP)
-                       msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
-                                                               / 1000);
-               else
-                       mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
-                                                               / 1000);
+               msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
        }
 
        if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
@@ -4941,8 +4916,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 
        drsprintk(ioc, pr_info(MPT3SAS_FMT
                "Wait for FW to go to the READY state\n", ioc->name));
-       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
-           sleep_flag);
+       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
        if (ioc_state) {
                pr_err(MPT3SAS_FMT
                        "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4961,14 +4935,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 /**
  * _base_make_ioc_ready - put controller in READY state
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
-       enum reset_type type)
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
 {
        u32 ioc_state;
        int rc;
@@ -4995,10 +4967,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
                                    ioc->name, __func__, ioc_state);
                                return -EFAULT;
                        }
-                       if (sleep_flag == CAN_SLEEP)
-                               ssleep(1);
-                       else
-                               mdelay(1000);
+                       ssleep(1);
                        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
                }
        }
@@ -5024,24 +4993,23 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
                if (!(_base_send_ioc_reset(ioc,
-                   MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+                   MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
                        return 0;
        }
 
  issue_diag_reset:
-       rc = _base_diag_reset(ioc, CAN_SLEEP);
+       rc = _base_diag_reset(ioc);
        return rc;
 }
 
 /**
  * _base_make_ioc_operational - put controller in OPERATIONAL state
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * Returns 0 for success, non-zero for failure.
  */
 static int
-_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
 {
        int r, i, index;
        unsigned long   flags;
@@ -5160,7 +5128,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
        }
  skip_init_reply_post_free_queue:
 
-       r = _base_send_ioc_init(ioc, sleep_flag);
+       r = _base_send_ioc_init(ioc);
        if (r)
                return r;
 
@@ -5186,13 +5154,11 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
  skip_init_reply_post_host_index:
 
        _base_unmask_interrupts(ioc);
-       r = _base_event_notification(ioc, sleep_flag);
+       r = _base_event_notification(ioc);
        if (r)
                return r;
 
-       if (sleep_flag == CAN_SLEEP)
-               _base_static_config_pages(ioc);
-
+       _base_static_config_pages(ioc);
 
        if (ioc->is_driver_loading) {
 
@@ -5211,7 +5177,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
                return r; /* scan_start and scan_finished support */
        }
 
-       r = _base_send_port_enable(ioc, sleep_flag);
+       r = _base_send_port_enable(ioc);
        if (r)
                return r;
 
@@ -5235,7 +5201,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
        if (ioc->chip_phys && ioc->chip) {
                _base_mask_interrupts(ioc);
                ioc->shost_recovery = 1;
-               _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+               _base_make_ioc_ready(ioc, SOFT_RESET);
                ioc->shost_recovery = 0;
        }
 
@@ -5292,7 +5258,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
                goto out_free_resources;
 
        pci_set_drvdata(ioc->pdev, ioc->shost);
-       r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+       r = _base_get_ioc_facts(ioc);
        if (r)
                goto out_free_resources;
 
@@ -5326,7 +5292,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        ioc->build_sg_mpi = &_base_build_sg;
        ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
 
-       r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+       r = _base_make_ioc_ready(ioc, SOFT_RESET);
        if (r)
                goto out_free_resources;
 
@@ -5338,12 +5304,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        }
 
        for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
-               r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+               r = _base_get_port_facts(ioc, i);
                if (r)
                        goto out_free_resources;
        }
 
-       r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+       r = _base_allocate_memory_pools(ioc);
        if (r)
                goto out_free_resources;
 
@@ -5429,7 +5395,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
                _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
 
-       r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+       r = _base_make_ioc_operational(ioc);
        if (r)
                goto out_free_resources;
 
@@ -5565,21 +5531,18 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
 /**
  * _wait_for_commands_to_complete - reset controller
  * @ioc: Pointer to MPT_ADAPTER structure
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  * This function waiting(3s) for all pending commands to complete
  * prior to putting controller in reset.
  */
 static void
-_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
 {
        u32 ioc_state;
        unsigned long flags;
        u16 i;
 
        ioc->pending_io_count = 0;
-       if (sleep_flag != CAN_SLEEP)
-               return;
 
        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
        if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
@@ -5602,13 +5565,12 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 /**
  * mpt3sas_base_hard_reset_handler - reset controller
  * @ioc: Pointer to MPT_ADAPTER structure
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  *
  * Returns 0 for success, non-zero for failure.
  */
 int
-mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        enum reset_type type)
 {
        int r;
@@ -5629,13 +5591,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
        if (mpt3sas_fwfault_debug)
                mpt3sas_halt_firmware(ioc);
 
-       /* TODO - What we really should be doing is pulling
-        * out all the code associated with NO_SLEEP; its never used.
-        * That is legacy code from mpt fusion driver, ported over.
-        * I will leave this BUG_ON here for now till its been resolved.
-        */
-       BUG_ON(sleep_flag == NO_SLEEP);
-
        /* wait for an active reset in progress to complete */
        if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
                do {
@@ -5660,9 +5615,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
                        is_fault = 1;
        }
        _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
-       _wait_for_commands_to_complete(ioc, sleep_flag);
+       _wait_for_commands_to_complete(ioc);
        _base_mask_interrupts(ioc);
-       r = _base_make_ioc_ready(ioc, sleep_flag, type);
+       r = _base_make_ioc_ready(ioc, type);
        if (r)
                goto out;
        _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
@@ -5675,7 +5630,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
                r = -EFAULT;
                goto out;
        }
-       r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+       r = _base_get_ioc_facts(ioc);
        if (r)
                goto out;
 
@@ -5684,7 +5639,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
                      "Please reboot the system and ensure that the correct"
                      " firmware version is running\n", ioc->name);
 
-       r = _base_make_ioc_operational(ioc, sleep_flag);
+       r = _base_make_ioc_operational(ioc);
        if (!r)
                _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
 
index 892c9be008b504b5b7eb57438496549bdd034ead..3e71bc1b4a80604cca3488b7ce1d07acffb24445 100644 (file)
 
 #define MPT_MAX_CALLBACKS              32
 
-
-#define         CAN_SLEEP                      1
-#define  NO_SLEEP                      0
-
 #define INTERNAL_CMDS_COUNT            10      /* reserved cmds */
 /* reserved for issuing internally framed scsi io cmds */
 #define INTERNAL_SCSIIO_CMDS_COUNT     3
@@ -478,7 +474,7 @@ struct _sas_device {
        u8      pfa_led_on;
        u8      pend_sas_rphy_add;
        u8      enclosure_level;
-       u8      connector_name[4];
+       u8      connector_name[5];
        struct kref refcount;
 };
 
@@ -794,16 +790,6 @@ struct reply_post_struct {
        dma_addr_t                      reply_post_free_dma;
 };
 
-/**
- * enum mutex_type - task management mutex type
- * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
- * @TM_MUTEX_ON: mutex is required
- */
-enum mutex_type {
-       TM_MUTEX_OFF = 0,
-       TM_MUTEX_ON = 1,
-};
-
 typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
 /**
  * struct MPT3SAS_ADAPTER - per adapter struct
@@ -1229,7 +1215,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
 int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
-int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        enum reset_type type);
 
 void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1291,7 +1277,11 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
 
 int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        uint channel, uint id, uint lun, u8 type, u16 smid_task,
-       ulong timeout, enum mutex_type m_type);
+       ulong timeout);
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+       uint channel, uint id, uint lun, u8 type, u16 smid_task,
+       ulong timeout);
+
 void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
index a6914ec99cc047ecb492e107988a1f5bb9551c1e..cebfd734fd769c78a36fb66c1805f5fb89e37ac2 100644 (file)
@@ -285,7 +285,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 {
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        Mpi2ConfigRequest_t *config_request;
        int r;
        u8 retry_count, issue_host_reset = 0;
@@ -386,8 +385,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
        _config_display_some_debug(ioc, smid, "config_request", NULL);
        init_completion(&ioc->config_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
-           timeout*HZ);
+       wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
        if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
                    ioc->name, __func__);
@@ -491,8 +489,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
        mutex_unlock(&ioc->config_cmds.mutex);
 
        if (issue_host_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        return r;
 }
 
index 7d00f09666b6302f0c3ebf351cd237d6b822d083..26cdc127ac89cf022490e8d87584f0542babfa04 100644 (file)
@@ -518,7 +518,7 @@ mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
  *
  * Called when application request fasyn callback handler.
  */
-int
+static int
 _ctl_fasync(int fd, struct file *filep, int mode)
 {
        return fasync_helper(fd, filep, mode, &async_queue);
@@ -530,7 +530,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
  * @wait -
  *
  */
-unsigned int
+static unsigned int
 _ctl_poll(struct file *filep, poll_table *wait)
 {
        struct MPT3SAS_ADAPTER *ioc;
@@ -641,9 +641,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        MPI2RequestHeader_t *mpi_request = NULL, *request;
        MPI2DefaultReply_t *mpi_reply;
        u32 ioc_state;
-       u16 ioc_status;
        u16 smid;
-       unsigned long timeout, timeleft;
+       unsigned long timeout;
        u8 issue_reset;
        u32 sz;
        void *psge;
@@ -914,8 +913,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
        else
                timeout = karg.timeout;
-       timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
-           timeout*HZ);
+       wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
        if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
                Mpi2SCSITaskManagementRequest_t *tm_request =
                    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
@@ -938,7 +936,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        }
 
        mpi_reply = ioc->ctl_cmds.reply;
-       ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
 
        if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
            (ioc->logging_level & MPT_DEBUG_TM)) {
@@ -1001,13 +998,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                                ioc->name,
                                le16_to_cpu(mpi_request->FunctionDependent1));
                        mpt3sas_halt_firmware(ioc);
-                       mpt3sas_scsih_issue_tm(ioc,
+                       mpt3sas_scsih_issue_locked_tm(ioc,
                            le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
-                           0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
-                           TM_MUTEX_ON);
+                           0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
                } else
-                       mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                           FORCE_BIG_HAMMER);
+                       mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        }
 
  out:
@@ -1220,8 +1215,7 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
        dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
            __func__));
 
-       retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-           FORCE_BIG_HAMMER);
+       retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        pr_info(MPT3SAS_FMT "host reset: %s\n",
            ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
        return 0;
@@ -1381,7 +1375,6 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
        Mpi2DiagBufferPostRequest_t *mpi_request;
        Mpi2DiagBufferPostReply_t *mpi_reply;
        u8 buffer_type;
-       unsigned long timeleft;
        u16 smid;
        u16 ioc_status;
        u32 ioc_state;
@@ -1499,7 +1492,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 
        init_completion(&ioc->ctl_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+       wait_for_completion_timeout(&ioc->ctl_cmds.done,
            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1538,8 +1531,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
  out:
 
@@ -1800,7 +1792,6 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
        u16 ioc_status;
        u32 ioc_state;
        int rc;
-       unsigned long timeleft;
 
        dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
            __func__));
@@ -1848,7 +1839,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 
        init_completion(&ioc->ctl_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+       wait_for_completion_timeout(&ioc->ctl_cmds.done,
            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1974,8 +1965,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
        rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
 
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
        return rc;
 }
@@ -1995,7 +1985,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
        Mpi2DiagBufferPostReply_t *mpi_reply;
        int rc, i;
        u8 buffer_type;
-       unsigned long timeleft, request_size, copy_size;
+       unsigned long request_size, copy_size;
        u16 smid;
        u16 ioc_status;
        u8 issue_reset = 0;
@@ -2116,7 +2106,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 
        init_completion(&ioc->ctl_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+       wait_for_completion_timeout(&ioc->ctl_cmds.done,
            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2155,8 +2145,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
  out:
 
@@ -2352,7 +2341,7 @@ out_unlock_pciaccess:
  * @cmd - ioctl opcode
  * @arg -
  */
-long
+static long
 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        long ret;
@@ -2372,7 +2361,7 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  * @cmd - ioctl opcode
  * @arg -
  */
-long
+static long
 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        long ret;
@@ -2392,7 +2381,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  *
  * This routine handles 32 bit applications in 64bit os.
  */
-long
+static long
 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 {
        long ret;
@@ -2410,7 +2399,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
  *
  * This routine handles 32 bit applications in 64bit os.
  */
-long
+static long
 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 {
        long ret;
index cd91a684c945a2c62e8c3626f6e1001c96935d35..209a969a979d8768fa5d0dc15bc5f921f2c4ec1c 100644 (file)
@@ -1195,7 +1195,7 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
  *
  * Returns queue depth.
  */
-int
+static int
 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 {
        struct Scsi_Host *shost = sdev->host;
@@ -1244,7 +1244,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  */
-int
+static int
 scsih_target_alloc(struct scsi_target *starget)
 {
        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1311,7 +1311,7 @@ scsih_target_alloc(struct scsi_target *starget)
  *
  * Returns nothing.
  */
-void
+static void
 scsih_target_destroy(struct scsi_target *starget)
 {
        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1320,7 +1320,6 @@ scsih_target_destroy(struct scsi_target *starget)
        struct _sas_device *sas_device;
        struct _raid_device *raid_device;
        unsigned long flags;
-       struct sas_rphy *rphy;
 
        sas_target_priv_data = starget->hostdata;
        if (!sas_target_priv_data)
@@ -1339,7 +1338,6 @@ scsih_target_destroy(struct scsi_target *starget)
        }
 
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
-       rphy = dev_to_rphy(starget->dev.parent);
        sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
        if (sas_device && (sas_device->starget == starget) &&
            (sas_device->id == starget->id) &&
@@ -1369,7 +1367,7 @@ scsih_target_destroy(struct scsi_target *starget)
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  */
-int
+static int
 scsih_slave_alloc(struct scsi_device *sdev)
 {
        struct Scsi_Host *shost;
@@ -1434,7 +1432,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
  *
  * Returns nothing.
  */
-void
+static void
 scsih_slave_destroy(struct scsi_device *sdev)
 {
        struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -1527,7 +1525,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
  * scsih_is_raid - return boolean indicating device is raid volume
  * @dev the device struct object
  */
-int
+static int
 scsih_is_raid(struct device *dev)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
@@ -1542,7 +1540,7 @@ scsih_is_raid(struct device *dev)
  * scsih_get_resync - get raid volume resync percent complete
  * @dev the device struct object
  */
-void
+static void
 scsih_get_resync(struct device *dev)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
@@ -1603,7 +1601,7 @@ scsih_get_resync(struct device *dev)
  * scsih_get_state - get raid volume level
  * @dev the device struct object
  */
-void
+static void
 scsih_get_state(struct device *dev)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
@@ -1805,7 +1803,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  */
-int
+static int
 scsih_slave_configure(struct scsi_device *sdev)
 {
        struct Scsi_Host *shost = sdev->host;
@@ -2021,7 +2019,7 @@ scsih_slave_configure(struct scsi_device *sdev)
  *
  * Return nothing.
  */
-int
+static int
 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
        sector_t capacity, int params[])
 {
@@ -2201,7 +2199,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  * @smid_task: smid assigned to the task
  * @timeout: timeout in seconds
- * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
  * Context: user
  *
  * A generic API for sending task management requests to firmware.
@@ -2212,60 +2209,51 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  */
 int
 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
-       uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
-       enum mutex_type m_type)
+       uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
 {
        Mpi2SCSITaskManagementRequest_t *mpi_request;
        Mpi2SCSITaskManagementReply_t *mpi_reply;
        u16 smid = 0;
        u32 ioc_state;
-       unsigned long timeleft;
        struct scsiio_tracker *scsi_lookup = NULL;
        int rc;
        u16 msix_task = 0;
 
-       if (m_type == TM_MUTEX_ON)
-               mutex_lock(&ioc->tm_cmds.mutex);
+       lockdep_assert_held(&ioc->tm_cmds.mutex);
+
        if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
                pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
                    __func__, ioc->name);
-               rc = FAILED;
-               goto err_out;
+               return FAILED;
        }
 
        if (ioc->shost_recovery || ioc->remove_host ||
            ioc->pci_error_recovery) {
                pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
                    __func__, ioc->name);
-               rc = FAILED;
-               goto err_out;
+               return FAILED;
        }
 
        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
        if (ioc_state & MPI2_DOORBELL_USED) {
                dhsprintk(ioc, pr_info(MPT3SAS_FMT
                        "unexpected doorbell active!\n", ioc->name));
-               rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
-               rc = (!rc) ? SUCCESS : FAILED;
-               goto err_out;
+               rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+               return (!rc) ? SUCCESS : FAILED;
        }
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
                mpt3sas_base_fault_info(ioc, ioc_state &
                    MPI2_DOORBELL_DATA_MASK);
-               rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
-               rc = (!rc) ? SUCCESS : FAILED;
-               goto err_out;
+               rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+               return (!rc) ? SUCCESS : FAILED;
        }
 
        smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
        if (!smid) {
                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
                    ioc->name, __func__);
-               rc = FAILED;
-               goto err_out;
+               return FAILED;
        }
 
        if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
@@ -2292,19 +2280,17 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
        else
                msix_task = 0;
        mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
-       timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+       wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
        if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
                    ioc->name, __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
                if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
-                       rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                           FORCE_BIG_HAMMER);
+                       rc = mpt3sas_base_hard_reset_handler(ioc,
+                                       FORCE_BIG_HAMMER);
                        rc = (!rc) ? SUCCESS : FAILED;
-                       ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
-                       mpt3sas_scsih_clear_tm_flag(ioc, handle);
-                       goto err_out;
+                       goto out;
                }
        }
 
@@ -2356,17 +2342,23 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
                break;
        }
 
+out:
        mpt3sas_scsih_clear_tm_flag(ioc, handle);
        ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
-       if (m_type == TM_MUTEX_ON)
-               mutex_unlock(&ioc->tm_cmds.mutex);
-
        return rc;
+}
 
- err_out:
-       if (m_type == TM_MUTEX_ON)
-               mutex_unlock(&ioc->tm_cmds.mutex);
-       return rc;
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+       uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+{
+       int ret;
+
+       mutex_lock(&ioc->tm_cmds.mutex);
+       ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+                       smid_task, timeout);
+       mutex_unlock(&ioc->tm_cmds.mutex);
+
+       return ret;
 }
 
 /**
@@ -2439,7 +2431,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
  *
  * Returns SUCCESS if command aborted else FAILED
  */
-int
+static int
 scsih_abort(struct scsi_cmnd *scmd)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2482,9 +2474,9 @@ scsih_abort(struct scsi_cmnd *scmd)
        mpt3sas_halt_firmware(ioc);
 
        handle = sas_device_priv_data->sas_target->handle;
-       r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+       r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
+           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2498,7 +2490,7 @@ scsih_abort(struct scsi_cmnd *scmd)
  *
  * Returns SUCCESS if command aborted else FAILED
  */
-int
+static int
 scsih_dev_reset(struct scsi_cmnd *scmd)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2541,9 +2533,9 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
                goto out;
        }
 
-       r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+       r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
+           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2561,7 +2553,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
  *
  * Returns SUCCESS if command aborted else FAILED
  */
-int
+static int
 scsih_target_reset(struct scsi_cmnd *scmd)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2603,9 +2595,9 @@ scsih_target_reset(struct scsi_cmnd *scmd)
                goto out;
        }
 
-       r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+       r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
-           30, TM_MUTEX_ON);
+           30);
 
  out:
        starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2624,7 +2616,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
  *
  * Returns SUCCESS if command aborted else FAILED
  */
-int
+static int
 scsih_host_reset(struct scsi_cmnd *scmd)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2641,8 +2633,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
                goto out;
        }
 
-       retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-           FORCE_BIG_HAMMER);
+       retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        r = (retval < 0) ? FAILED : SUCCESS;
 out:
        pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
@@ -3455,7 +3446,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  *
  * Context - processed in interrupt context.
  */
-void
+static void
 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
                                u32 event_context)
 {
@@ -3494,7 +3485,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
  *
  * Context - processed in interrupt context.
  */
-void
+static void
 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
                                        u16 smid, u16 handle)
        {
@@ -4032,7 +4023,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  */
-int
+static int
 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -4701,7 +4692,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
                            le16_to_cpu(mpi_reply->DevHandle));
                mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
 
-               if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
+               if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
                     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
                     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
                     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
@@ -5380,8 +5371,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
                     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
                        sas_device->enclosure_level =
                                le16_to_cpu(sas_device_pg0.EnclosureLevel);
-                       memcpy(&sas_device->connector_name[0],
-                               &sas_device_pg0.ConnectorName[0], 4);
+                       memcpy(sas_device->connector_name,
+                               sas_device_pg0.ConnectorName, 4);
+                       sas_device->connector_name[4] = '\0';
                } else {
                        sas_device->enclosure_level = 0;
                        sas_device->connector_name[0] = '\0';
@@ -5508,8 +5500,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
        if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
                sas_device->enclosure_level =
                        le16_to_cpu(sas_device_pg0.EnclosureLevel);
-               memcpy(&sas_device->connector_name[0],
-                       &sas_device_pg0.ConnectorName[0], 4);
+               memcpy(sas_device->connector_name,
+                       sas_device_pg0.ConnectorName, 4);
+               sas_device->connector_name[4] = '\0';
        } else {
                sas_device->enclosure_level = 0;
                sas_device->connector_name[0] = '\0';
@@ -6087,8 +6080,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
                r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
-                   TM_MUTEX_OFF);
+                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
                if (r == FAILED) {
                        sdev_printk(KERN_WARNING, sdev,
                            "mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -6128,8 +6120,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
                        goto out_no_lock;
 
                r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
-                   sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-                   TM_MUTEX_OFF);
+                   sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
+                   30);
                if (r == FAILED) {
                        sdev_printk(KERN_WARNING, sdev,
                            "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -6297,8 +6289,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
        mutex_unlock(&ioc->scsih_cmds.mutex);
 
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        return rc;
 }
 
@@ -6311,11 +6302,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
 static void
 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
 {
-       int rc;
        sdev->no_uld_attach = no_uld_attach ? 1 : 0;
        sdev_printk(KERN_INFO, sdev, "%s raid component\n",
            sdev->no_uld_attach ? "hidding" : "exposing");
-       rc = scsi_device_reprobe(sdev);
+       WARN_ON(scsi_device_reprobe(sdev));
 }
 
 /**
@@ -8137,7 +8127,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
  * Routine called when unloading the driver.
  * Return nothing.
  */
-void scsih_remove(struct pci_dev *pdev)
+static void scsih_remove(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8210,7 +8200,7 @@ void scsih_remove(struct pci_dev *pdev)
  *
  * Return nothing.
  */
-void
+static void
 scsih_shutdown(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8451,7 +8441,7 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
  * of scanning the entire bus.  In our implemention, we will kick off
  * firmware discovery.
  */
-void
+static void
 scsih_scan_start(struct Scsi_Host *shost)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8478,7 +8468,7 @@ scsih_scan_start(struct Scsi_Host *shost)
  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
  * we wait for firmware discovery to complete, then return 1.
  */
-int
+static int
 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8608,7 +8598,7 @@ static struct raid_function_template mpt3sas_raid_functions = {
  *     MPI25_VERSION for SAS 3.0 HBA devices, and
  *     MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
  */
-u16
+static u16
 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
 {
 
@@ -8660,7 +8650,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
  *
  * Returns 0 success, anything else error.
  */
-int
+static int
 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct MPT3SAS_ADAPTER *ioc;
@@ -8869,7 +8859,7 @@ out_add_shost_fail:
  *
  * Returns 0 success, anything else error.
  */
-int
+static int
 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8896,7 +8886,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
  *
  * Returns 0 success, anything else error.
  */
-int
+static int
 scsih_resume(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8916,7 +8906,7 @@ scsih_resume(struct pci_dev *pdev)
        if (r)
                return r;
 
-       mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+       mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
        scsi_unblock_requests(shost);
        mpt3sas_base_start_watchdog(ioc);
        return 0;
@@ -8933,7 +8923,7 @@ scsih_resume(struct pci_dev *pdev)
  * Return value:
  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  */
-pci_ers_result_t
+static pci_ers_result_t
 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8970,7 +8960,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  * code after the PCI slot has been reset, just before we
  * should resume normal operations.
  */
-pci_ers_result_t
+static pci_ers_result_t
 scsih_pci_slot_reset(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8987,8 +8977,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
        if (rc)
                return PCI_ERS_RESULT_DISCONNECT;
 
-       rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-           FORCE_BIG_HAMMER);
+       rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
        pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
            (rc == 0) ? "success" : "failed");
@@ -9007,7 +8996,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
  * OK to resume normal operation. Use completion to allow
  * halted scsi ops to resume.
  */
-void
+static void
 scsih_pci_resume(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9024,7 +9013,7 @@ scsih_pci_resume(struct pci_dev *pdev)
  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
  * @pdev: pointer to PCI device
  */
-pci_ers_result_t
+static pci_ers_result_t
 scsih_pci_mmio_enabled(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9152,7 +9141,7 @@ static struct pci_driver mpt3sas_driver = {
  *
  * Returns 0 success, anything else error.
  */
-int
+static int
 scsih_init(void)
 {
        mpt2_ids = 0;
@@ -9202,7 +9191,7 @@ scsih_init(void)
  *
  * Returns 0 success, anything else error.
  */
-void
+static void
 scsih_exit(void)
 {
 
index ff93286bc32f0ada8e034cf69d8c0d505e3640f1..b74faf1a69b27426a5d1ae01b2df7634ac4dba7b 100644 (file)
@@ -300,7 +300,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
        int rc;
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        void *psge;
        u8 issue_reset = 0;
        void *data_out = NULL;
@@ -394,8 +393,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
                ioc->name, (unsigned long long)sas_address));
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-           10*HZ);
+       wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -446,8 +444,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  out:
        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
        if (data_out)
@@ -1107,7 +1104,6 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
        int rc;
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        void *psge;
        u8 issue_reset = 0;
        void *data_out = NULL;
@@ -1203,8 +1199,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
                phy->number));
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-           10*HZ);
+       wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1253,8 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  out:
        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
        if (data_out)
@@ -1421,7 +1415,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
        int rc;
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        void *psge;
        u8 issue_reset = 0;
        void *data_out = NULL;
@@ -1522,8 +1515,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
                phy->number, phy_operation));
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-           10*HZ);
+       wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1564,8 +1556,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
 
  issue_host_reset:
        if (issue_reset)
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  out:
        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
        if (data_out)
@@ -1899,7 +1890,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        int rc;
        u16 smid;
        u32 ioc_state;
-       unsigned long timeleft;
        void *psge;
        u8 issue_reset = 0;
        dma_addr_t dma_addr_in = 0;
@@ -2043,8 +2033,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
-       timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-           10*HZ);
+       wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s : timeout\n",
@@ -2103,8 +2092,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
  issue_host_reset:
        if (issue_reset) {
-               mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-                   FORCE_BIG_HAMMER);
+               mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
                rc = -ETIMEDOUT;
        }
 
index b2a88200fe546b2243574b90422cb52da73b10fb..68a5c347fae9a578fdcacbf6c618a0c15f4f6bb6 100644 (file)
@@ -306,7 +306,7 @@ static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
  * Return Value
  *      None
  */
-void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
+static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
 {
        struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
        dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
@@ -401,7 +401,7 @@ static struct pmcraid_cmd *pmcraid_get_free_cmd(
  * Return Value:
  *     nothing
  */
-void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
+static void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
 {
        struct pmcraid_instance *pinstance = cmd->drv_inst;
        unsigned long lock_flags;
@@ -1710,7 +1710,7 @@ static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
  * @ioasc: ioasc code
  * @cmd: pointer to command that resulted in 'ioasc'
  */
-void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
+static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
 {
        struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
 
@@ -3137,7 +3137,7 @@ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
  *   returns pointer pmcraid_ioadl_desc, initialized to point to internal
  *   or external IOADLs
  */
-struct pmcraid_ioadl_desc *
+static struct pmcraid_ioadl_desc *
 pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
 {
        struct pmcraid_ioadl_desc *ioadl;
index 2674f4c16bc319da17b016783aa5ef10d7c914bb..ace65db1d2a25becd6dc3c4a158d932f7dab8fff 100644 (file)
@@ -899,12 +899,12 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-       while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
-           ha->flags.mbox_busy) ||
-               test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
-               test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
-                       if (test_bit(UNLOADING, &base_vha->dpc_flags))
-                               break;
+       while ((qla2x00_reset_active(vha) || ha->dpc_active ||
+               ha->flags.mbox_busy) ||
+              test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+              test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
+               if (test_bit(UNLOADING, &base_vha->dpc_flags))
+                       break;
                msleep(1000);
        }
 }
@@ -4694,7 +4694,7 @@ retry_unlock:
                        qla83xx_wait_logic();
                        retry++;
                        ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
-                           "Failed to release IDC lock, retyring=%d\n", retry);
+                           "Failed to release IDC lock, retrying=%d\n", retry);
                        goto retry_unlock;
                }
        } else if (retry < 10) {
@@ -4702,7 +4702,7 @@ retry_unlock:
                qla83xx_wait_logic();
                retry++;
                ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
-                   "Failed to read drv-lockid, retyring=%d\n", retry);
+                   "Failed to read drv-lockid, retrying=%d\n", retry);
                goto retry_unlock;
        }
 
@@ -4718,7 +4718,7 @@ retry_unlock2:
                        qla83xx_wait_logic();
                        retry++;
                        ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
-                           "Failed to release IDC lock, retyring=%d\n", retry);
+                           "Failed to release IDC lock, retrying=%d\n", retry);
                        goto retry_unlock2;
                }
        }
index ae87d6c19f17034448037732c39c633b1a645096..06ddd13cb7ccb9d4872e651eed2fdddfd55f797a 100644 (file)
@@ -1843,7 +1843,7 @@ static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
        return rval;
 }
 
-uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
                                uint32_t addr3, uint32_t mask, uint32_t addr,
                                uint32_t *data_ptr)
 {
index 57a4b9973320f159d13730214f3e1f7c6d7b24eb..ff3cde397a8d93e9f4a0adf9c4d4a49ed53dbf3e 100644 (file)
@@ -85,12 +85,14 @@ extern void scsi_device_unbusy(struct scsi_device *sdev);
 extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
+extern void scsi_requeue_run_queue(struct work_struct *work);
 extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
 extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
 extern int scsi_init_queue(void);
 extern void scsi_exit_queue(void);
+extern void scsi_evt_thread(struct work_struct *work);
 struct request_queue;
 struct request;
 extern struct kmem_cache *scsi_sdb_cache;
index e0a78f53d80960f04672d162c842a93b2128172f..212e98d940bc222885d8ece9a675df4782cc8be1 100644 (file)
@@ -217,8 +217,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
        struct scsi_device *sdev;
        int display_failure_msg = 1, ret;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-       extern void scsi_evt_thread(struct work_struct *work);
-       extern void scsi_requeue_run_queue(struct work_struct *work);
 
        sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
                       GFP_ATOMIC);
index ae7d9bdf409c816d014ac2d668a1e449fce0c8e9..070332eb41f33de2c765bedc724f50f404af62a9 100644 (file)
@@ -79,18 +79,7 @@ static void sg_proc_cleanup(void);
  */
 #define SG_MAX_CDB_SIZE 252
 
-/*
- * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
- * Then when using 32 bit integers x * m may overflow during the calculation.
- * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
- * calculates the same, but prevents the overflow when both m and d
- * are "small" numbers (like HZ and USER_HZ).
- * Of course an overflow is inavoidable if the result of muldiv doesn't fit
- * in 32 bits.
- */
-#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
-
-#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
+#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
 
 int sg_big_buff = SG_DEF_RESERVED_SIZE;
 /* N.B. This variable is readable and writeable via
@@ -884,10 +873,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
                        return result;
                if (val < 0)
                        return -EIO;
-               if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
-                   val = MULDIV (INT_MAX, USER_HZ, HZ);
+               if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
+                       val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
+                                   INT_MAX);
                sfp->timeout_user = val;
-               sfp->timeout = MULDIV (val, HZ, USER_HZ);
+               sfp->timeout = mult_frac(val, HZ, USER_HZ);
 
                return 0;
        case SG_GET_TIMEOUT:    /* N.B. User receives timeout as return value */
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
new file mode 100644 (file)
index 0000000..97e159c
--- /dev/null
@@ -0,0 +1,54 @@
+#
+# Kernel configuration file for the SMARTPQI
+#
+# Copyright (c) 2016 Microsemi Corporation
+# Copyright (c) 2016 PMC-Sierra, Inc.
+#  (mailto:esc.storagedev@microsemi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+config SCSI_SMARTPQI
+       tristate "Microsemi PQI Driver"
+       depends on PCI && SCSI && !S390
+       select SCSI_SAS_ATTRS
+       select RAID_ATTRS
+       ---help---
+       This driver supports Microsemi PQI controllers.
+
+       <http://www.microsemi.com>
+
+       To compile this driver as a module, choose M here: the
+       module will be called smartpqi.
+
+        Note: the aacraid driver will not manage a smartpqi
+              controller. You need to enable smartpqi for smartpqi
+              controllers. For more information, please see
+              Documentation/scsi/smartpqi.txt
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
new file mode 100644 (file)
index 0000000..0f42a22
--- /dev/null
@@ -0,0 +1,3 @@
+ccflags-y += -I.
+obj-m          += smartpqi.o
+smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
new file mode 100644 (file)
index 0000000..07b6444
--- /dev/null
@@ -0,0 +1,1136 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#if !defined(_SMARTPQI_H)
+#define _SMARTPQI_H
+
+#pragma pack(1)
+
+#define PQI_DEVICE_SIGNATURE   "PQI DREG"
+
+/* This structure is defined by the PQI specification. */
+struct pqi_device_registers {
+       __le64  signature;
+       u8      function_and_status_code;
+       u8      reserved[7];
+       u8      max_admin_iq_elements;
+       u8      max_admin_oq_elements;
+       u8      admin_iq_element_length;        /* in 16-byte units */
+       u8      admin_oq_element_length;        /* in 16-byte units */
+       __le16  max_reset_timeout;              /* in 100-millisecond units */
+       u8      reserved1[2];
+       __le32  legacy_intx_status;
+       __le32  legacy_intx_mask_set;
+       __le32  legacy_intx_mask_clear;
+       u8      reserved2[28];
+       __le32  device_status;
+       u8      reserved3[4];
+       __le64  admin_iq_pi_offset;
+       __le64  admin_oq_ci_offset;
+       __le64  admin_iq_element_array_addr;
+       __le64  admin_oq_element_array_addr;
+       __le64  admin_iq_ci_addr;
+       __le64  admin_oq_pi_addr;
+       u8      admin_iq_num_elements;
+       u8      admin_oq_num_elements;
+       __le16  admin_queue_int_msg_num;
+       u8      reserved4[4];
+       __le32  device_error;
+       u8      reserved5[4];
+       __le64  error_details;
+       __le32  device_reset;
+       __le32  power_action;
+       u8      reserved6[104];
+};
+
+/*
+ * controller registers
+ *
+ * These are defined by the PMC implementation.
+ *
+ * Some registers (those named sis_*) are only used when in
+ * legacy SIS mode before we transition the controller into
+ * PQI mode.  There are a number of other SIS mode registers,
+ * but we don't use them, so only the SIS registers that we
+ * care about are defined here.  The offsets mentioned in the
+ * comments are the offsets from the PCIe BAR 0.
+ */
+struct pqi_ctrl_registers {
+       u8      reserved[0x20];
+       __le32  sis_host_to_ctrl_doorbell;              /* 20h */
+       u8      reserved1[0x34 - (0x20 + sizeof(__le32))];
+       __le32  sis_interrupt_mask;                     /* 34h */
+       u8      reserved2[0x9c - (0x34 + sizeof(__le32))];
+       __le32  sis_ctrl_to_host_doorbell;              /* 9Ch */
+       u8      reserved3[0xa0 - (0x9c + sizeof(__le32))];
+       __le32  sis_ctrl_to_host_doorbell_clear;        /* A0h */
+       u8      reserved4[0xb0 - (0xa0 + sizeof(__le32))];
+       __le32  sis_driver_scratch;                     /* B0h */
+       u8      reserved5[0xbc - (0xb0 + sizeof(__le32))];
+       __le32  sis_firmware_status;                    /* BCh */
+       u8      reserved6[0x1000 - (0xbc + sizeof(__le32))];
+       __le32  sis_mailbox[8];                         /* 1000h */
+       u8      reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
+       /*
+        * The PQI spec states that the PQI registers should be at
+        * offset 0 from the PCIe BAR 0.  However, we can't map
+        * them at offset 0 because that would break compatibility
+        * with the SIS registers.  So we map them at offset 4000h.
+        */
+       struct pqi_device_registers pqi_registers;      /* 4000h */
+};
+
+#define PQI_DEVICE_REGISTERS_OFFSET    0x4000
+
+enum pqi_io_path {
+       RAID_PATH = 0,
+       AIO_PATH = 1
+};
+
+struct pqi_sg_descriptor {
+       __le64  address;
+       __le32  length;
+       __le32  flags;
+};
+
+/* manifest constants for the flags field of pqi_sg_descriptor */
+#define CISS_SG_LAST   0x40000000
+#define CISS_SG_CHAIN  0x80000000
+
+struct pqi_iu_header {
+       u8      iu_type;
+       u8      reserved;
+       __le16  iu_length;      /* in bytes - does not include the length */
+                               /* of this header */
+       __le16  response_queue_id;      /* specifies the OQ where the */
+                                       /*   response IU is to be delivered */
+       u8      work_area[2];   /* reserved for driver use */
+};
+
+/*
+ * According to the PQI spec, the IU header is only the first 4 bytes of our
+ * pqi_iu_header structure.
+ */
+#define PQI_REQUEST_HEADER_LENGTH      4
+
+struct pqi_general_admin_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       u8      function_code;
+       union {
+               struct {
+                       u8      reserved[33];
+                       __le32  buffer_length;
+                       struct pqi_sg_descriptor sg_descriptor;
+               } report_device_capability;
+
+               struct {
+                       u8      reserved;
+                       __le16  queue_id;
+                       u8      reserved1[2];
+                       __le64  element_array_addr;
+                       __le64  ci_addr;
+                       __le16  num_elements;
+                       __le16  element_length;
+                       u8      queue_protocol;
+                       u8      reserved2[23];
+                       __le32  vendor_specific;
+               } create_operational_iq;
+
+               struct {
+                       u8      reserved;
+                       __le16  queue_id;
+                       u8      reserved1[2];
+                       __le64  element_array_addr;
+                       __le64  pi_addr;
+                       __le16  num_elements;
+                       __le16  element_length;
+                       u8      queue_protocol;
+                       u8      reserved2[3];
+                       __le16  int_msg_num;
+                       __le16  coalescing_count;
+                       __le32  min_coalescing_time;
+                       __le32  max_coalescing_time;
+                       u8      reserved3[8];
+                       __le32  vendor_specific;
+               } create_operational_oq;
+
+               struct {
+                       u8      reserved;
+                       __le16  queue_id;
+                       u8      reserved1[50];
+               } delete_operational_queue;
+
+               struct {
+                       u8      reserved;
+                       __le16  queue_id;
+                       u8      reserved1[46];
+                       __le32  vendor_specific;
+               } change_operational_iq_properties;
+
+       } data;
+};
+
+struct pqi_general_admin_response {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       u8      function_code;
+       u8      status;
+       union {
+               struct {
+                       u8      status_descriptor[4];
+                       __le64  iq_pi_offset;
+                       u8      reserved[40];
+               } create_operational_iq;
+
+               struct {
+                       u8      status_descriptor[4];
+                       __le64  oq_ci_offset;
+                       u8      reserved[40];
+               } create_operational_oq;
+       } data;
+};
+
+struct pqi_iu_layer_descriptor {
+       u8      inbound_spanning_supported : 1;
+       u8      reserved : 7;
+       u8      reserved1[5];
+       __le16  max_inbound_iu_length;
+       u8      outbound_spanning_supported : 1;
+       u8      reserved2 : 7;
+       u8      reserved3[5];
+       __le16  max_outbound_iu_length;
+};
+
+struct pqi_device_capability {
+       __le16  data_length;
+       u8      reserved[6];
+       u8      iq_arbitration_priority_support_bitmask;
+       u8      maximum_aw_a;
+       u8      maximum_aw_b;
+       u8      maximum_aw_c;
+       u8      max_arbitration_burst : 3;
+       u8      reserved1 : 4;
+       u8      iqa : 1;
+       u8      reserved2[2];
+       u8      iq_freeze : 1;
+       u8      reserved3 : 7;
+       __le16  max_inbound_queues;
+       __le16  max_elements_per_iq;
+       u8      reserved4[4];
+       __le16  max_iq_element_length;
+       __le16  min_iq_element_length;
+       u8      reserved5[2];
+       __le16  max_outbound_queues;
+       __le16  max_elements_per_oq;
+       __le16  intr_coalescing_time_granularity;
+       __le16  max_oq_element_length;
+       __le16  min_oq_element_length;
+       u8      reserved6[24];
+       struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
+};
+
+#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS                4
+
+struct pqi_raid_path_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       __le16  nexus_id;
+       __le32  buffer_length;
+       u8      lun_number[8];
+       __le16  protocol_specific;
+       u8      data_direction : 2;
+       u8      partial : 1;
+       u8      reserved1 : 4;
+       u8      fence : 1;
+       __le16  error_index;
+       u8      reserved2;
+       u8      task_attribute : 3;
+       u8      command_priority : 4;
+       u8      reserved3 : 1;
+       u8      reserved4 : 2;
+       u8      additional_cdb_bytes_usage : 3;
+       u8      reserved5 : 3;
+       u8      cdb[32];
+       struct pqi_sg_descriptor
+               sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+struct pqi_aio_path_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       u8      reserved1[2];
+       __le32  nexus_id;
+       __le32  buffer_length;
+       u8      data_direction : 2;
+       u8      partial : 1;
+       u8      memory_type : 1;
+       u8      fence : 1;
+       u8      encryption_enable : 1;
+       u8      reserved2 : 2;
+       u8      task_attribute : 3;
+       u8      command_priority : 4;
+       u8      reserved3 : 1;
+       __le16  data_encryption_key_index;
+       __le32  encrypt_tweak_lower;
+       __le32  encrypt_tweak_upper;
+       u8      cdb[16];
+       __le16  error_index;
+       u8      num_sg_descriptors;
+       u8      cdb_length;
+       u8      lun_number[8];
+       u8      reserved4[4];
+       struct pqi_sg_descriptor
+               sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+struct pqi_io_response {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       __le16  error_index;
+       u8      reserved2[4];
+};
+
+struct pqi_general_management_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       union {
+               struct {
+                       u8      reserved[2];
+                       __le32  buffer_length;
+                       struct pqi_sg_descriptor sg_descriptors[3];
+               } report_event_configuration;
+
+               struct {
+                       __le16  global_event_oq_id;
+                       __le32  buffer_length;
+                       struct pqi_sg_descriptor sg_descriptors[3];
+               } set_event_configuration;
+       } data;
+};
+
+struct pqi_event_descriptor {
+       u8      event_type;
+       u8      reserved;
+       __le16  oq_id;
+};
+
+struct pqi_event_config {
+       u8      reserved[2];
+       u8      num_event_descriptors;
+       u8      reserved1;
+       struct pqi_event_descriptor descriptors[1];
+};
+
+#define PQI_MAX_EVENT_DESCRIPTORS      255
+
+struct pqi_event_response {
+       struct pqi_iu_header header;
+       u8      event_type;
+       u8      reserved2 : 7;
+       u8      request_acknowlege : 1;
+       __le16  event_id;
+       __le32  additional_event_id;
+       u8      data[16];
+};
+
+struct pqi_event_acknowledge_request {
+       struct pqi_iu_header header;
+       u8      event_type;
+       u8      reserved2;
+       __le16  event_id;
+       __le32  additional_event_id;
+};
+
+struct pqi_task_management_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       __le16  nexus_id;
+       u8      reserved[4];
+       u8      lun_number[8];
+       __le16  protocol_specific;
+       __le16  outbound_queue_id_to_manage;
+       __le16  request_id_to_manage;
+       u8      task_management_function;
+       u8      reserved2 : 7;
+       u8      fence : 1;
+};
+
+#define SOP_TASK_MANAGEMENT_LUN_RESET  0x8
+
+struct pqi_task_management_response {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       __le16  nexus_id;
+       u8      additional_response_info[3];
+       u8      response_code;
+};
+
+struct pqi_aio_error_info {
+       u8      status;
+       u8      service_response;
+       u8      data_present;
+       u8      reserved;
+       __le32  residual_count;
+       __le16  data_length;
+       __le16  reserved1;
+       u8      data[256];
+};
+
+struct pqi_raid_error_info {
+       u8      data_in_result;
+       u8      data_out_result;
+       u8      reserved[3];
+       u8      status;
+       __le16  status_qualifier;
+       __le16  sense_data_length;
+       __le16  response_data_length;
+       __le32  data_in_transferred;
+       __le32  data_out_transferred;
+       u8      data[256];
+};
+
+#define PQI_REQUEST_IU_TASK_MANAGEMENT                 0x13
+#define PQI_REQUEST_IU_RAID_PATH_IO                    0x14
+#define PQI_REQUEST_IU_AIO_PATH_IO                     0x15
+#define PQI_REQUEST_IU_GENERAL_ADMIN                   0x60
+#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG      0x72
+#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG         0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT                0xf6
+
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT             0x81
+#define PQI_RESPONSE_IU_TASK_MANAGEMENT                        0x93
+#define PQI_RESPONSE_IU_GENERAL_ADMIN                  0xe0
+#define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS           0xf0
+#define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS            0xf1
+#define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR             0xf2
+#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR              0xf3
+#define PQI_RESPONSE_IU_AIO_PATH_DISABLED              0xf4
+#define PQI_RESPONSE_IU_VENDOR_EVENT                   0xf5
+
+#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY    0x0
+#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ                   0x10
+#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ                   0x11
+#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ                   0x12
+#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ                   0x13
+#define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY          0x14
+
+#define PQI_GENERAL_ADMIN_STATUS_SUCCESS       0x0
+
+#define PQI_IQ_PROPERTY_IS_AIO_QUEUE   0x1
+
+#define PQI_GENERAL_ADMIN_IU_LENGTH            0x3c
+#define PQI_PROTOCOL_SOP                       0x0
+
+#define PQI_DATA_IN_OUT_GOOD                                   0x0
+#define PQI_DATA_IN_OUT_UNDERFLOW                              0x1
+#define PQI_DATA_IN_OUT_BUFFER_ERROR                           0x40
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW                                0x41
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA                0x42
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE                 0x43
+#define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR                      0x60
+#define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT                        0x61
+#define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED          0x62
+#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED      0x63
+#define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED                 0x64
+#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST               0x65
+#define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION                     0x66
+#define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED                        0x67
+#define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ              0x6F
+#define PQI_DATA_IN_OUT_ERROR                                  0xf0
+#define PQI_DATA_IN_OUT_PROTOCOL_ERROR                         0xf1
+#define PQI_DATA_IN_OUT_HARDWARE_ERROR                         0xf2
+#define PQI_DATA_IN_OUT_UNSOLICITED_ABORT                      0xf3
+#define PQI_DATA_IN_OUT_ABORTED                                        0xf4
+#define PQI_DATA_IN_OUT_TIMEOUT                                        0xf5
+
+#define CISS_CMD_STATUS_SUCCESS                        0x0
+#define CISS_CMD_STATUS_TARGET_STATUS          0x1
+#define CISS_CMD_STATUS_DATA_UNDERRUN          0x2
+#define CISS_CMD_STATUS_DATA_OVERRUN           0x3
+#define CISS_CMD_STATUS_INVALID                        0x4
+#define CISS_CMD_STATUS_PROTOCOL_ERROR         0x5
+#define CISS_CMD_STATUS_HARDWARE_ERROR         0x6
+#define CISS_CMD_STATUS_CONNECTION_LOST                0x7
+#define CISS_CMD_STATUS_ABORTED                        0x8
+#define CISS_CMD_STATUS_ABORT_FAILED           0x9
+#define CISS_CMD_STATUS_UNSOLICITED_ABORT      0xa
+#define CISS_CMD_STATUS_TIMEOUT                        0xb
+#define CISS_CMD_STATUS_UNABORTABLE            0xc
+#define CISS_CMD_STATUS_TMF                    0xd
+#define CISS_CMD_STATUS_AIO_DISABLED           0xe
+
+#define PQI_NUM_EVENT_QUEUE_ELEMENTS   32
+#define PQI_EVENT_OQ_ELEMENT_LENGTH    sizeof(struct pqi_event_response)
+
+#define PQI_EVENT_TYPE_HOTPLUG                 0x1
+#define PQI_EVENT_TYPE_HARDWARE                        0x2
+#define PQI_EVENT_TYPE_PHYSICAL_DEVICE         0x4
+#define PQI_EVENT_TYPE_LOGICAL_DEVICE          0x5
+#define PQI_EVENT_TYPE_AIO_STATE_CHANGE                0xfd
+#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE       0xfe
+#define PQI_EVENT_TYPE_HEARTBEAT               0xff
+
+#pragma pack()
+
+#define PQI_ERROR_BUFFER_ELEMENT_LENGTH                \
+       sizeof(struct pqi_raid_error_info)
+
+/* these values are based on our implementation */
+#define PQI_ADMIN_IQ_NUM_ELEMENTS              8
+#define PQI_ADMIN_OQ_NUM_ELEMENTS              20
+#define PQI_ADMIN_IQ_ELEMENT_LENGTH            64
+#define PQI_ADMIN_OQ_ELEMENT_LENGTH            64
+
+#define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH      128
+#define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH      16
+
+#define PQI_MIN_MSIX_VECTORS           1
+#define PQI_MAX_MSIX_VECTORS           64
+
+/* these values are defined by the PQI spec */
+#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE       255
+#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
+#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT      64
+#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT     16
+#define PQI_ADMIN_INDEX_ALIGNMENT              64
+#define PQI_OPERATIONAL_INDEX_ALIGNMENT                4
+
+#define PQI_MIN_OPERATIONAL_QUEUE_ID           1
+#define PQI_MAX_OPERATIONAL_QUEUE_ID           65535
+
+#define PQI_AIO_SERV_RESPONSE_COMPLETE         0
+#define PQI_AIO_SERV_RESPONSE_FAILURE          1
+#define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE     2
+#define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED    3
+#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED     4
+#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN        5
+
+#define PQI_AIO_STATUS_IO_ERROR                        0x1
+#define PQI_AIO_STATUS_IO_ABORTED              0x2
+#define PQI_AIO_STATUS_NO_PATH_TO_DEVICE       0x3
+#define PQI_AIO_STATUS_INVALID_DEVICE          0x4
+#define PQI_AIO_STATUS_AIO_PATH_DISABLED       0xe
+#define PQI_AIO_STATUS_UNDERRUN                        0x51
+#define PQI_AIO_STATUS_OVERRUN                 0x75
+
+typedef u32 pqi_index_t;
+
+/* SOP data direction flags */
+#define SOP_NO_DIRECTION_FLAG  0
+#define SOP_WRITE_FLAG         1       /* host writes data to Data-Out */
+                                       /* buffer */
+#define SOP_READ_FLAG          2       /* host receives data from Data-In */
+                                       /* buffer */
+#define SOP_BIDIRECTIONAL      3       /* data is transferred from the */
+                                       /* Data-Out buffer and data is */
+                                       /* transferred to the Data-In buffer */
+
+#define SOP_TASK_ATTRIBUTE_SIMPLE              0
+#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE       1
+#define SOP_TASK_ATTRIBUTE_ORDERED             2
+#define SOP_TASK_ATTRIBUTE_ACA                 4
+
+#define SOP_TMF_COMPLETE               0x0
+#define SOP_TMF_FUNCTION_SUCCEEDED     0x8
+
+/* additional CDB bytes usage field codes */
+#define SOP_ADDITIONAL_CDB_BYTES_0     0       /* 16-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_4     1       /* 20-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_8     2       /* 24-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_12    3       /* 28-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_16    4       /* 32-byte CDB */
+
+/*
+ * The purpose of this structure is to obtain proper alignment of objects in
+ * an admin queue pair.
+ */
+struct pqi_admin_queues_aligned {
+       __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
+               u8      iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
+                                       [PQI_ADMIN_IQ_NUM_ELEMENTS];
+       __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
+               u8      oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
+                                       [PQI_ADMIN_OQ_NUM_ELEMENTS];
+       __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
+       __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
+};
+
+struct pqi_admin_queues {
+       void            *iq_element_array;
+       void            *oq_element_array;
+       volatile pqi_index_t *iq_ci;
+       volatile pqi_index_t *oq_pi;
+       dma_addr_t      iq_element_array_bus_addr;
+       dma_addr_t      oq_element_array_bus_addr;
+       dma_addr_t      iq_ci_bus_addr;
+       dma_addr_t      oq_pi_bus_addr;
+       __le32 __iomem  *iq_pi;
+       pqi_index_t     iq_pi_copy;
+       __le32 __iomem  *oq_ci;
+       pqi_index_t     oq_ci_copy;
+       struct task_struct *task;
+       u16             int_msg_num;
+};
+
+struct pqi_queue_group {
+       struct pqi_ctrl_info *ctrl_info;        /* backpointer */
+       u16             iq_id[2];
+       u16             oq_id;
+       u16             int_msg_num;
+       void            *iq_element_array[2];
+       void            *oq_element_array;
+       dma_addr_t      iq_element_array_bus_addr[2];
+       dma_addr_t      oq_element_array_bus_addr;
+       __le32 __iomem  *iq_pi[2];
+       pqi_index_t     iq_pi_copy[2];
+       volatile pqi_index_t *iq_ci[2];
+       volatile pqi_index_t *oq_pi;
+       dma_addr_t      iq_ci_bus_addr[2];
+       dma_addr_t      oq_pi_bus_addr;
+       __le32 __iomem  *oq_ci;
+       pqi_index_t     oq_ci_copy;
+       spinlock_t      submit_lock[2]; /* protect submission queue */
+       struct list_head request_list[2];
+};
+
+struct pqi_event_queue {
+       u16             oq_id;
+       u16             int_msg_num;
+       void            *oq_element_array;
+       volatile pqi_index_t *oq_pi;
+       dma_addr_t      oq_element_array_bus_addr;
+       dma_addr_t      oq_pi_bus_addr;
+       __le32 __iomem  *oq_ci;
+       pqi_index_t     oq_ci_copy;
+};
+
+#define PQI_DEFAULT_QUEUE_GROUP                0
+#define PQI_MAX_QUEUE_GROUPS           PQI_MAX_MSIX_VECTORS
+
+struct pqi_encryption_info {
+       u16     data_encryption_key_index;
+       u32     encrypt_tweak_lower;
+       u32     encrypt_tweak_upper;
+};
+
+#define PQI_MAX_OUTSTANDING_REQUESTS   ((u32)~0)
+#define PQI_MAX_TRANSFER_SIZE          (4 * 1024U * 1024U)
+
+#define RAID_MAP_MAX_ENTRIES           1024
+
+#define PQI_PHYSICAL_DEVICE_BUS                0
+#define PQI_RAID_VOLUME_BUS            1
+#define PQI_HBA_BUS                    2
+#define PQI_MAX_BUS                    PQI_HBA_BUS
+
+#pragma pack(1)
+
+struct report_lun_header {
+       __be32  list_length;
+       u8      extended_response;
+       u8      reserved[3];
+};
+
+struct report_log_lun_extended_entry {
+       u8      lunid[8];
+       u8      volume_id[16];
+};
+
+struct report_log_lun_extended {
+       struct report_lun_header header;
+       struct report_log_lun_extended_entry lun_entries[1];
+};
+
+struct report_phys_lun_extended_entry {
+       u8      lunid[8];
+       __be64  wwid;
+       u8      device_type;
+       u8      device_flags;
+       u8      lun_count;      /* number of LUNs in a multi-LUN device */
+       u8      redundant_paths;
+       u32     aio_handle;
+};
+
+/* for device_flags field of struct report_phys_lun_extended_entry */
+#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK      0x1
+#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED   0x8
+
+struct report_phys_lun_extended {
+       struct report_lun_header header;
+       struct report_phys_lun_extended_entry lun_entries[1];
+};
+
+struct raid_map_disk_data {
+       u32     aio_handle;
+       u8      xor_mult[2];
+       u8      reserved[2];
+};
+
+/* constants for flags field of RAID map */
+#define RAID_MAP_ENCRYPTION_ENABLED    0x1
+
+struct raid_map {
+       __le32  structure_size;         /* size of entire structure in bytes */
+       __le32  volume_blk_size;        /* bytes / block in the volume */
+       __le64  volume_blk_cnt;         /* logical blocks on the volume */
+       u8      phys_blk_shift;         /* shift factor to convert between */
+                                       /* units of logical blocks and */
+                                       /* physical disk blocks */
+       u8      parity_rotation_shift;  /* shift factor to convert between */
+                                       /* units of logical stripes and */
+                                       /* physical stripes */
+       __le16  strip_size;             /* blocks used on each disk / stripe */
+       __le64  disk_starting_blk;      /* first disk block used in volume */
+       __le64  disk_blk_cnt;           /* disk blocks used by volume / disk */
+       __le16  data_disks_per_row;     /* data disk entries / row in the map */
+       __le16  metadata_disks_per_row; /* mirror/parity disk entries / row */
+                                       /* in the map */
+       __le16  row_cnt;                /* rows in each layout map */
+       __le16  layout_map_count;       /* layout maps (1 map per */
+                                       /* mirror parity group) */
+       __le16  flags;
+       __le16  data_encryption_key_index;
+       u8      reserved[16];
+       struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
+};
+
+#pragma pack()
+
+#define RAID_CTLR_LUNID                "\0\0\0\0\0\0\0\0"
+
+struct pqi_scsi_dev {
+       int     devtype;                /* as reported by INQUIRY commmand */
+       u8      device_type;            /* as reported by */
+                                       /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
+                                       /* only valid for devtype = TYPE_DISK */
+       int     bus;
+       int     target;
+       int     lun;
+       u8      scsi3addr[8];
+       __be64  wwid;
+       u8      volume_id[16];
+       u8      is_physical_device : 1;
+       u8      target_lun_valid : 1;
+       u8      expose_device : 1;
+       u8      no_uld_attach : 1;
+       u8      aio_enabled : 1;        /* only valid for physical disks */
+       u8      device_gone : 1;
+       u8      new_device : 1;
+       u8      keep_device : 1;
+       u8      volume_offline : 1;
+       u8      vendor[8];              /* bytes 8-15 of inquiry data */
+       u8      model[16];              /* bytes 16-31 of inquiry data */
+       u64     sas_address;
+       u8      raid_level;
+       u16     queue_depth;            /* max. queue_depth for this device */
+       u16     advertised_queue_depth;
+       u32     aio_handle;
+       u8      volume_status;
+       u8      active_path_index;
+       u8      path_map;
+       u8      bay;
+       u8      box[8];
+       u16     phys_connector[8];
+       int     offload_configured;     /* I/O accel RAID offload configured */
+       int     offload_enabled;        /* I/O accel RAID offload enabled */
+       int     offload_enabled_pending;
+       int     offload_to_mirror;      /* Send next I/O accelerator RAID */
+                                       /* offload request to mirror drive. */
+       struct raid_map *raid_map;      /* I/O accelerator RAID map */
+
+       struct pqi_sas_port *sas_port;
+       struct scsi_device *sdev;
+
+       struct list_head scsi_device_list_entry;
+       struct list_head new_device_list_entry;
+       struct list_head add_list_entry;
+       struct list_head delete_list_entry;
+};
+
+/* VPD inquiry pages */
+#define SCSI_VPD_SUPPORTED_PAGES       0x0     /* standard page */
+#define SCSI_VPD_DEVICE_ID             0x83    /* standard page */
+#define CISS_VPD_LV_DEVICE_GEOMETRY    0xc1    /* vendor-specific page */
+#define CISS_VPD_LV_OFFLOAD_STATUS     0xc2    /* vendor-specific page */
+#define CISS_VPD_LV_STATUS             0xc3    /* vendor-specific page */
+
+#define VPD_PAGE       (1 << 8)
+
+#pragma pack(1)
+
+/* structure for CISS_VPD_LV_STATUS */
+struct ciss_vpd_logical_volume_status {
+       u8      peripheral_info;
+       u8      page_code;
+       u8      reserved;
+       u8      page_length;
+       u8      volume_status;
+       u8      reserved2[3];
+       __be32  flags;
+};
+
+#pragma pack()
+
+/* constants for volume_status field of ciss_vpd_logical_volume_status */
+#define CISS_LV_OK                                     0
+#define CISS_LV_FAILED                                 1
+#define CISS_LV_NOT_CONFIGURED                         2
+#define CISS_LV_DEGRADED                               3
+#define CISS_LV_READY_FOR_RECOVERY                     4
+#define CISS_LV_UNDERGOING_RECOVERY                    5
+#define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED          6
+#define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM      7
+#define CISS_LV_HARDWARE_OVERHEATING                   8
+#define CISS_LV_HARDWARE_HAS_OVERHEATED                        9
+#define CISS_LV_UNDERGOING_EXPANSION                   10
+#define CISS_LV_NOT_AVAILABLE                          11
+#define CISS_LV_QUEUED_FOR_EXPANSION                   12
+#define CISS_LV_DISABLED_SCSI_ID_CONFLICT              13
+#define CISS_LV_EJECTED                                        14
+#define CISS_LV_UNDERGOING_ERASE                       15
+/* state 16 not used */
+#define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD     17
+#define CISS_LV_UNDERGOING_RPI                         18
+#define CISS_LV_PENDING_RPI                            19
+#define CISS_LV_ENCRYPTED_NO_KEY                       20
+/* state 21 not used */
+#define CISS_LV_UNDERGOING_ENCRYPTION                  22
+#define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING         23
+#define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER  24
+#define CISS_LV_PENDING_ENCRYPTION                     25
+#define CISS_LV_PENDING_ENCRYPTION_REKEYING            26
+#define CISS_LV_NOT_SUPPORTED                          27
+#define CISS_LV_STATUS_UNAVAILABLE                     255
+
+/* constants for flags field of ciss_vpd_logical_volume_status */
+#define CISS_LV_FLAGS_NO_HOST_IO       0x1     /* volume not available for */
+                                               /* host I/O */
+
+/* for SAS hosts and SAS expanders */
+struct pqi_sas_node {
+       struct device *parent_dev;
+       struct list_head port_list_head;
+};
+
+struct pqi_sas_port {
+       struct list_head port_list_entry;
+       u64     sas_address;
+       struct sas_port *port;
+       int     next_phy_index;
+       struct list_head phy_list_head;
+       struct pqi_sas_node *parent_node;
+       struct sas_rphy *rphy;
+};
+
+struct pqi_sas_phy {
+       struct list_head phy_list_entry;
+       struct sas_phy *phy;
+       struct pqi_sas_port *parent_port;
+       bool    added_to_port;
+};
+
+struct pqi_io_request {
+       atomic_t        refcount;
+       u16             index;
+       void (*io_complete_callback)(struct pqi_io_request *io_request,
+               void *context);
+       void            *context;
+       int             status;
+       struct scsi_cmnd *scmd;
+       void            *error_info;
+       struct pqi_sg_descriptor *sg_chain_buffer;
+       dma_addr_t      sg_chain_buffer_dma_handle;
+       void            *iu;
+       struct list_head request_list_entry;
+};
+
+/* for indexing into the pending_events[] field of struct pqi_ctrl_info */
+#define PQI_EVENT_HEARTBEAT            0
+#define PQI_EVENT_HOTPLUG              1
+#define PQI_EVENT_HARDWARE             2
+#define PQI_EVENT_PHYSICAL_DEVICE      3
+#define PQI_EVENT_LOGICAL_DEVICE       4
+#define PQI_EVENT_AIO_STATE_CHANGE     5
+#define PQI_EVENT_AIO_CONFIG_CHANGE    6
+#define PQI_NUM_SUPPORTED_EVENTS       7
+
+struct pqi_event {
+       bool    pending;
+       u8      event_type;
+       __le16  event_id;
+       __le32  additional_event_id;
+};
+
+#define PQI_RESERVED_IO_SLOTS_LUN_RESET                        1
+#define PQI_RESERVED_IO_SLOTS_EVENT_ACK                        PQI_NUM_SUPPORTED_EVENTS
+#define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS     3
+#define PQI_RESERVED_IO_SLOTS                          \
+       (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
+       PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
+
+struct pqi_ctrl_info {
+       unsigned int    ctrl_id;
+       struct pci_dev  *pci_dev;
+       char            firmware_version[11];
+       void __iomem    *iomem_base;
+       struct pqi_ctrl_registers __iomem *registers;
+       struct pqi_device_registers __iomem *pqi_registers;
+       u32             max_sg_entries;
+       u32             config_table_offset;
+       u32             config_table_length;
+       u16             max_inbound_queues;
+       u16             max_elements_per_iq;
+       u16             max_iq_element_length;
+       u16             max_outbound_queues;
+       u16             max_elements_per_oq;
+       u16             max_oq_element_length;
+       u32             max_transfer_size;
+       u32             max_outstanding_requests;
+       u32             max_io_slots;
+       unsigned int    scsi_ml_can_queue;
+       unsigned short  sg_tablesize;
+       unsigned int    max_sectors;
+       u32             error_buffer_length;
+       void            *error_buffer;
+       dma_addr_t      error_buffer_dma_handle;
+       size_t          sg_chain_buffer_length;
+       unsigned int    num_queue_groups;
+       unsigned int    num_active_queue_groups;
+       u16             num_elements_per_iq;
+       u16             num_elements_per_oq;
+       u16             max_inbound_iu_length_per_firmware;
+       u16             max_inbound_iu_length;
+       unsigned int    max_sg_per_iu;
+       void            *admin_queue_memory_base;
+       u32             admin_queue_memory_length;
+       dma_addr_t      admin_queue_memory_base_dma_handle;
+       void            *queue_memory_base;
+       u32             queue_memory_length;
+       dma_addr_t      queue_memory_base_dma_handle;
+       struct pqi_admin_queues admin_queues;
+       struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
+       struct pqi_event_queue event_queue;
+       int             max_msix_vectors;
+       int             num_msix_vectors_enabled;
+       int             num_msix_vectors_initialized;
+       u32             msix_vectors[PQI_MAX_MSIX_VECTORS];
+       void            *intr_data[PQI_MAX_MSIX_VECTORS];
+       int             event_irq;
+       struct Scsi_Host *scsi_host;
+
+       struct mutex    scan_mutex;
+       u8              inbound_spanning_supported : 1;
+       u8              outbound_spanning_supported : 1;
+       u8              pqi_mode_enabled : 1;
+       u8              controller_online : 1;
+       u8              heartbeat_timer_started : 1;
+
+       struct list_head scsi_device_list;
+       spinlock_t      scsi_device_list_lock;
+
+       struct delayed_work rescan_work;
+       struct delayed_work update_time_work;
+
+       struct pqi_sas_node *sas_host;
+       u64             sas_address;
+
+       struct pqi_io_request *io_request_pool;
+       u16             next_io_request_slot;
+
+       struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
+       struct work_struct event_work;
+
+       atomic_t        num_interrupts;
+       int             previous_num_interrupts;
+       unsigned int    num_heartbeats_requested;
+       struct timer_list heartbeat_timer;
+
+       struct semaphore sync_request_sem;
+       struct semaphore lun_reset_sem;
+};
+
+enum pqi_ctrl_mode {
+       UNKNOWN,
+       PQI_MODE
+};
+
+/*
+ * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
+ */
+#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH      27
+
+/* 0 = no limit */
+#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH      0
+
+/* CISS commands */
+#define CISS_READ              0xc0
+#define CISS_REPORT_LOG                0xc2    /* Report Logical LUNs */
+#define CISS_REPORT_PHYS       0xc3    /* Report Physical LUNs */
+#define CISS_GET_RAID_MAP      0xc8
+
+/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
+#define CISS_REPORT_LOG_EXTENDED               0x1
+#define CISS_REPORT_PHYS_EXTENDED              0x2
+
+/* BMIC commands */
+#define BMIC_IDENTIFY_CONTROLLER               0x11
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE          0x15
+#define BMIC_READ                              0x26
+#define BMIC_WRITE                             0x27
+#define BMIC_SENSE_CONTROLLER_PARAMETERS       0x64
+#define BMIC_SENSE_SUBSYSTEM_INFORMATION       0x66
+#define BMIC_WRITE_HOST_WELLNESS               0xa5
+#define BMIC_CACHE_FLUSH                       0xc2
+
+#define SA_CACHE_FLUSH                         0x01
+
+#define MASKED_DEVICE(lunid)                   ((lunid)[3] & 0xc0)
+#define CISS_GET_BUS(lunid)                    ((lunid)[7] & 0x3f)
+#define CISS_GET_LEVEL_2_TARGET(lunid)         ((lunid)[6])
+#define CISS_GET_DRIVE_NUMBER(lunid)           \
+       (((CISS_GET_BUS((lunid)) - 1) << 8) +   \
+       CISS_GET_LEVEL_2_TARGET((lunid)))
+
+#define NO_TIMEOUT             ((unsigned long) -1)
+
+#pragma pack(1)
+
+struct bmic_identify_controller {
+       u8      configured_logical_drive_count;
+       __le32  configuration_signature;
+       u8      firmware_version[4];
+       u8      reserved[145];
+       __le16  extended_logical_unit_count;
+       u8      reserved1[34];
+       __le16  firmware_build_number;
+       u8      reserved2[100];
+       u8      controller_mode;
+       u8      reserved3[32];
+};
+
+struct bmic_identify_physical_device {
+       u8      scsi_bus;               /* SCSI Bus number on controller */
+       u8      scsi_id;                /* SCSI ID on this bus */
+       __le16  block_size;             /* sector size in bytes */
+       __le32  total_blocks;           /* number for sectors on drive */
+       __le32  reserved_blocks;        /* controller reserved (RIS) */
+       u8      model[40];              /* Physical Drive Model */
+       u8      serial_number[40];      /* Drive Serial Number */
+       u8      firmware_revision[8];   /* drive firmware revision */
+       u8      scsi_inquiry_bits;      /* inquiry byte 7 bits */
+       u8      compaq_drive_stamp;     /* 0 means drive not stamped */
+       u8      last_failure_reason;
+       u8      flags;
+       u8      more_flags;
+       u8      scsi_lun;               /* SCSI LUN for phys drive */
+       u8      yet_more_flags;
+       u8      even_more_flags;
+       __le32  spi_speed_rules;
+       u8      phys_connector[2];      /* connector number on controller */
+       u8      phys_box_on_bus;        /* phys enclosure this drive resides */
+       u8      phys_bay_in_box;        /* phys drv bay this drive resides */
+       __le32  rpm;                    /* drive rotational speed in RPM */
+       u8      device_type;            /* type of drive */
+       u8      sata_version;           /* only valid when device_type = */
+                                       /* BMIC_DEVICE_TYPE_SATA */
+       __le64  big_total_block_count;
+       __le64  ris_starting_lba;
+       __le32  ris_size;
+       u8      wwid[20];
+       u8      controller_phy_map[32];
+       __le16  phy_count;
+       u8      phy_connected_dev_type[256];
+       u8      phy_to_drive_bay_num[256];
+       __le16  phy_to_attached_dev_index[256];
+       u8      box_index;
+       u8      reserved;
+       __le16  extra_physical_drive_flags;
+       u8      negotiated_link_rate[256];
+       u8      phy_to_phy_map[256];
+       u8      redundant_path_present_map;
+       u8      redundant_path_failure_map;
+       u8      active_path_number;
+       __le16  alternate_paths_phys_connector[8];
+       u8      alternate_paths_phys_box_on_port[8];
+       u8      multi_lun_device_lun_count;
+       u8      minimum_good_fw_revision[8];
+       u8      unique_inquiry_bytes[20];
+       u8      current_temperature_degreesC;
+       u8      temperature_threshold_degreesC;
+       u8      max_temperature_degreesC;
+       u8      logical_blocks_per_phys_block_exp;
+       __le16  current_queue_depth_limit;
+       u8      switch_name[10];
+       __le16  switch_port;
+       u8      alternate_paths_switch_name[40];
+       u8      alternate_paths_switch_port[8];
+       __le16  power_on_hours;
+       __le16  percent_endurance_used;
+       u8      drive_authentication;
+       u8      smart_carrier_authentication;
+       u8      smart_carrier_app_fw_version;
+       u8      smart_carrier_bootloader_fw_version;
+       u8      encryption_key_name[64];
+       __le32  misc_drive_flags;
+       __le16  dek_index;
+       u8      padding[112];
+};
+
+#pragma pack()
+
+int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
+void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
+int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
+       struct pqi_scsi_dev *device);
+void pqi_remove_sas_device(struct pqi_scsi_dev *device);
+struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
+       struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
+
+extern struct sas_function_template pqi_sas_transport_functions;
+
+#if !defined(readq)
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+       u32 lower32;
+       u32 upper32;
+
+       lower32 = readl(addr);
+       upper32 = readl(addr + 4);
+
+       return ((u64)upper32 << 32) | lower32;
+}
+#endif
+
+#if !defined(writeq)
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+       u32 lower32;
+       u32 upper32;
+
+       lower32 = lower_32_bits(value);
+       upper32 = upper_32_bits(value);
+
+       writel(lower32, addr);
+       writel(upper32, addr + 4);
+}
+#endif
+
+#endif /* _SMARTPQI_H */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
new file mode 100644 (file)
index 0000000..52cfa26
--- /dev/null
@@ -0,0 +1,6302 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/cciss_ioctl.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_transport_sas.h>
+#include <asm/unaligned.h>
+#include "smartpqi.h"
+#include "smartpqi_sis.h"
+
+#if !defined(BUILD_TIMESTAMP)
+#define BUILD_TIMESTAMP
+#endif
+
+#define DRIVER_VERSION         "0.9.13-370"
+#define DRIVER_MAJOR           0
+#define DRIVER_MINOR           9
+#define DRIVER_RELEASE         13
+#define DRIVER_REVISION                370
+
+#define DRIVER_NAME            "Microsemi PQI Driver (v" DRIVER_VERSION ")"
+#define DRIVER_NAME_SHORT      "smartpqi"
+
+MODULE_AUTHOR("Microsemi");
+MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+       DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
+
+static char *hpe_branded_controller = "HPE Smart Array Controller";
+static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
+static void pqi_scan_start(struct Scsi_Host *shost);
+static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_queue_group *queue_group, enum pqi_io_path path,
+       struct pqi_io_request *io_request);
+static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_iu_header *request, unsigned int flags,
+       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
+       unsigned int cdb_length, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info);
+
+/* for flags argument to pqi_submit_raid_request_synchronous() */
+#define PQI_SYNC_FLAGS_INTERRUPTABLE   0x1
+
+static struct scsi_transport_template *pqi_sas_transport_template;
+
+static atomic_t pqi_controller_count = ATOMIC_INIT(0);
+
+static int pqi_disable_device_id_wildcards;
+module_param_named(disable_device_id_wildcards,
+       pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_device_id_wildcards,
+       "Disable device ID wildcards.");
+
+static char *raid_levels[] = {
+       "RAID-0",
+       "RAID-4",
+       "RAID-1(1+0)",
+       "RAID-5",
+       "RAID-5+1",
+       "RAID-ADG",
+       "RAID-1(ADM)",
+};
+
+static char *pqi_raid_level_to_string(u8 raid_level)
+{
+       if (raid_level < ARRAY_SIZE(raid_levels))
+               return raid_levels[raid_level];
+
+       return "";
+}
+
+#define SA_RAID_0              0
+#define SA_RAID_4              1
+#define SA_RAID_1              2       /* also used for RAID 10 */
+#define SA_RAID_5              3       /* also used for RAID 50 */
+#define SA_RAID_51             4
+#define SA_RAID_6              5       /* also used for RAID 60 */
+#define SA_RAID_ADM            6       /* also used for RAID 1+0 ADM */
+#define SA_RAID_MAX            SA_RAID_ADM
+#define SA_RAID_UNKNOWN                0xff
+
+static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
+{
+       scmd->scsi_done(scmd);
+}
+
+static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
+{
+       return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
+}
+
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
+{
+       void *hostdata = shost_priv(shost);
+
+       return *((struct pqi_ctrl_info **)hostdata);
+}
+
+static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
+{
+       return !device->is_physical_device;
+}
+
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+       return !ctrl_info->controller_online;
+}
+
+static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
+{
+       if (ctrl_info->controller_online)
+               if (!sis_is_firmware_running(ctrl_info))
+                       pqi_take_ctrl_offline(ctrl_info);
+}
+
+static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
+{
+       return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
+}
+
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       return sis_read_driver_scratch(ctrl_info);
+}
+
+static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
+       enum pqi_ctrl_mode mode)
+{
+       sis_write_driver_scratch(ctrl_info, mode);
+}
+
+#define PQI_RESCAN_WORK_INTERVAL       (10 * HZ)
+
+static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
+{
+       schedule_delayed_work(&ctrl_info->rescan_work,
+               PQI_RESCAN_WORK_INTERVAL);
+}
+
+static int pqi_map_single(struct pci_dev *pci_dev,
+       struct pqi_sg_descriptor *sg_descriptor, void *buffer,
+       size_t buffer_length, int data_direction)
+{
+       dma_addr_t bus_address;
+
+       if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+               return 0;
+
+       bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+               data_direction);
+       if (pci_dma_mapping_error(pci_dev, bus_address))
+               return -ENOMEM;
+
+       put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
+       put_unaligned_le32(buffer_length, &sg_descriptor->length);
+       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+       return 0;
+}
+
+static void pqi_pci_unmap(struct pci_dev *pci_dev,
+       struct pqi_sg_descriptor *descriptors, int num_descriptors,
+       int data_direction)
+{
+       int i;
+
+       if (data_direction == PCI_DMA_NONE)
+               return;
+
+       for (i = 0; i < num_descriptors; i++)
+               pci_unmap_single(pci_dev,
+                       (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
+                       get_unaligned_le32(&descriptors[i].length),
+                       data_direction);
+}
+
+static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_raid_path_request *request, u8 cmd,
+       u8 *scsi3addr, void *buffer, size_t buffer_length,
+       u16 vpd_page, int *pci_direction)
+{
+       u8 *cdb;
+       int pci_dir;
+
+       memset(request, 0, sizeof(*request));
+
+       request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+       put_unaligned_le16(offsetof(struct pqi_raid_path_request,
+               sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
+               &request->header.iu_length);
+       put_unaligned_le32(buffer_length, &request->buffer_length);
+       memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
+       request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
+
+       cdb = request->cdb;
+
+       switch (cmd) {
+       case INQUIRY:
+               request->data_direction = SOP_READ_FLAG;
+               cdb[0] = INQUIRY;
+               if (vpd_page & VPD_PAGE) {
+                       cdb[1] = 0x1;
+                       cdb[2] = (u8)vpd_page;
+               }
+               cdb[4] = (u8)buffer_length;
+               break;
+       case CISS_REPORT_LOG:
+       case CISS_REPORT_PHYS:
+               request->data_direction = SOP_READ_FLAG;
+               cdb[0] = cmd;
+               if (cmd == CISS_REPORT_PHYS)
+                       cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+               else
+                       cdb[1] = CISS_REPORT_LOG_EXTENDED;
+               put_unaligned_be32(buffer_length, &cdb[6]);
+               break;
+       case CISS_GET_RAID_MAP:
+               request->data_direction = SOP_READ_FLAG;
+               cdb[0] = CISS_READ;
+               cdb[1] = CISS_GET_RAID_MAP;
+               put_unaligned_be32(buffer_length, &cdb[6]);
+               break;
+       case SA_CACHE_FLUSH:
+               request->data_direction = SOP_WRITE_FLAG;
+               cdb[0] = BMIC_WRITE;
+               cdb[6] = BMIC_CACHE_FLUSH;
+               put_unaligned_be16(buffer_length, &cdb[7]);
+               break;
+       case BMIC_IDENTIFY_CONTROLLER:
+       case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+               request->data_direction = SOP_READ_FLAG;
+               cdb[0] = BMIC_READ;
+               cdb[6] = cmd;
+               put_unaligned_be16(buffer_length, &cdb[7]);
+               break;
+       case BMIC_WRITE_HOST_WELLNESS:
+               request->data_direction = SOP_WRITE_FLAG;
+               cdb[0] = BMIC_WRITE;
+               cdb[6] = cmd;
+               put_unaligned_be16(buffer_length, &cdb[7]);
+               break;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
+                       cmd);
+               WARN_ON(cmd);
+               break;
+       }
+
+       switch (request->data_direction) {
+       case SOP_READ_FLAG:
+               pci_dir = PCI_DMA_FROMDEVICE;
+               break;
+       case SOP_WRITE_FLAG:
+               pci_dir = PCI_DMA_TODEVICE;
+               break;
+       case SOP_NO_DIRECTION_FLAG:
+               pci_dir = PCI_DMA_NONE;
+               break;
+       default:
+               pci_dir = PCI_DMA_BIDIRECTIONAL;
+               break;
+       }
+
+       *pci_direction = pci_dir;
+
+       return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
+               buffer, buffer_length, pci_dir);
+}
+
+static struct pqi_io_request *pqi_alloc_io_request(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       struct pqi_io_request *io_request;
+       u16 i = ctrl_info->next_io_request_slot;        /* benignly racy */
+
+       while (1) {
+               io_request = &ctrl_info->io_request_pool[i];
+               if (atomic_inc_return(&io_request->refcount) == 1)
+                       break;
+               atomic_dec(&io_request->refcount);
+               i = (i + 1) % ctrl_info->max_io_slots;
+       }
+
+       /* benignly racy */
+       ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
+
+       io_request->scmd = NULL;
+       io_request->status = 0;
+       io_request->error_info = NULL;
+
+       return io_request;
+}
+
+static void pqi_free_io_request(struct pqi_io_request *io_request)
+{
+       atomic_dec(&io_request->refcount);
+}
+
+static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
+       struct bmic_identify_controller *buffer)
+{
+       int rc;
+       int pci_direction;
+       struct pqi_raid_path_request request;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
+               sizeof(*buffer), 0, &pci_direction);
+       if (rc)
+               return rc;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+               NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+       return rc;
+}
+
+static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
+       u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
+{
+       int rc;
+       int pci_direction;
+       struct pqi_raid_path_request request;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
+               &pci_direction);
+       if (rc)
+               return rc;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+               NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+       return rc;
+}
+
+static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device,
+       struct bmic_identify_physical_device *buffer,
+       size_t buffer_length)
+{
+       int rc;
+       int pci_direction;
+       u16 bmic_device_index;
+       struct pqi_raid_path_request request;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
+               buffer_length, 0, &pci_direction);
+       if (rc)
+               return rc;
+
+       bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
+       request.cdb[2] = (u8)bmic_device_index;
+       request.cdb[9] = (u8)(bmic_device_index >> 8);
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+               0, NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+       return rc;
+}
+
+#define SA_CACHE_FLUSH_BUFFER_LENGTH   4
+
+static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct pqi_raid_path_request request;
+       int pci_direction;
+       u8 *buffer;
+
+       /*
+        * Don't bother trying to flush the cache if the controller is
+        * locked up.
+        */
+       if (pqi_ctrl_offline(ctrl_info))
+               return -ENXIO;
+
+       buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
+               SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
+       if (rc)
+               goto out;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+               0, NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+out:
+       kfree(buffer);
+
+       return rc;
+}
+
+static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
+       void *buffer, size_t buffer_length)
+{
+       int rc;
+       struct pqi_raid_path_request request;
+       int pci_direction;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
+               buffer_length, 0, &pci_direction);
+       if (rc)
+               return rc;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+               0, NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+       return rc;
+}
+
+#pragma pack(1)
+
+struct bmic_host_wellness_driver_version {
+       u8      start_tag[4];
+       u8      driver_version_tag[2];
+       __le16  driver_version_length;
+       char    driver_version[32];
+       u8      end_tag[2];
+};
+
+#pragma pack()
+
+static int pqi_write_driver_version_to_host_wellness(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct bmic_host_wellness_driver_version *buffer;
+       size_t buffer_length;
+
+       buffer_length = sizeof(*buffer);
+
+       buffer = kmalloc(buffer_length, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       buffer->start_tag[0] = '<';
+       buffer->start_tag[1] = 'H';
+       buffer->start_tag[2] = 'W';
+       buffer->start_tag[3] = '>';
+       buffer->driver_version_tag[0] = 'D';
+       buffer->driver_version_tag[1] = 'V';
+       put_unaligned_le16(sizeof(buffer->driver_version),
+               &buffer->driver_version_length);
+       strncpy(buffer->driver_version, DRIVER_VERSION,
+               sizeof(buffer->driver_version) - 1);
+       buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+       buffer->end_tag[0] = 'Z';
+       buffer->end_tag[1] = 'Z';
+
+       rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
+
+       kfree(buffer);
+
+       return rc;
+}
+
+#pragma pack(1)
+
+struct bmic_host_wellness_time {
+       u8      start_tag[4];
+       u8      time_tag[2];
+       __le16  time_length;
+       u8      time[8];
+       u8      dont_write_tag[2];
+       u8      end_tag[2];
+};
+
+#pragma pack()
+
+static int pqi_write_current_time_to_host_wellness(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct bmic_host_wellness_time *buffer;
+       size_t buffer_length;
+       time64_t local_time;
+       unsigned int year;
+       struct timeval time;
+       struct rtc_time tm;
+
+       buffer_length = sizeof(*buffer);
+
+       buffer = kmalloc(buffer_length, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       buffer->start_tag[0] = '<';
+       buffer->start_tag[1] = 'H';
+       buffer->start_tag[2] = 'W';
+       buffer->start_tag[3] = '>';
+       buffer->time_tag[0] = 'T';
+       buffer->time_tag[1] = 'D';
+       put_unaligned_le16(sizeof(buffer->time),
+               &buffer->time_length);
+
+       do_gettimeofday(&time);
+       local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
+       rtc_time64_to_tm(local_time, &tm);
+       year = tm.tm_year + 1900;
+
+       buffer->time[0] = bin2bcd(tm.tm_hour);
+       buffer->time[1] = bin2bcd(tm.tm_min);
+       buffer->time[2] = bin2bcd(tm.tm_sec);
+       buffer->time[3] = 0;
+       buffer->time[4] = bin2bcd(tm.tm_mon + 1);
+       buffer->time[5] = bin2bcd(tm.tm_mday);
+       buffer->time[6] = bin2bcd(year / 100);
+       buffer->time[7] = bin2bcd(year % 100);
+
+       buffer->dont_write_tag[0] = 'D';
+       buffer->dont_write_tag[1] = 'W';
+       buffer->end_tag[0] = 'Z';
+       buffer->end_tag[1] = 'Z';
+
+       rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
+
+       kfree(buffer);
+
+       return rc;
+}
+
+#define PQI_UPDATE_TIME_WORK_INTERVAL  (24UL * 60 * 60 * HZ)
+
+static void pqi_update_time_worker(struct work_struct *work)
+{
+       int rc;
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
+               update_time_work);
+
+       rc = pqi_write_current_time_to_host_wellness(ctrl_info);
+       if (rc)
+               dev_warn(&ctrl_info->pci_dev->dev,
+                       "error updating time on controller\n");
+
+       schedule_delayed_work(&ctrl_info->update_time_work,
+               PQI_UPDATE_TIME_WORK_INTERVAL);
+}
+
+static inline void pqi_schedule_update_time_worker(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       schedule_delayed_work(&ctrl_info->update_time_work, 0);
+}
+
+static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+       void *buffer, size_t buffer_length)
+{
+       int rc;
+       int pci_direction;
+       struct pqi_raid_path_request request;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+       if (rc)
+               return rc;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+               NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+       return rc;
+}
+
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+       void **buffer)
+{
+       int rc;
+       size_t lun_list_length;
+       size_t lun_data_length;
+       size_t new_lun_list_length;
+       void *lun_data = NULL;
+       struct report_lun_header *report_lun_header;
+
+       report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
+       if (!report_lun_header) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
+               sizeof(*report_lun_header));
+       if (rc)
+               goto out;
+
+       lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
+
+again:
+       lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
+
+       lun_data = kmalloc(lun_data_length, GFP_KERNEL);
+       if (!lun_data) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       if (lun_list_length == 0) {
+               memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
+               goto out;
+       }
+
+       rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
+       if (rc)
+               goto out;
+
+       new_lun_list_length = get_unaligned_be32(
+               &((struct report_lun_header *)lun_data)->list_length);
+
+       if (new_lun_list_length > lun_list_length) {
+               lun_list_length = new_lun_list_length;
+               kfree(lun_data);
+               goto again;
+       }
+
+out:
+       kfree(report_lun_header);
+
+       if (rc) {
+               kfree(lun_data);
+               lun_data = NULL;
+       }
+
+       *buffer = lun_data;
+
+       return rc;
+}
+
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
+       void **buffer)
+{
+       return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
+               buffer);
+}
+
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
+       void **buffer)
+{
+       return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
+}
+
+static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
+       struct report_phys_lun_extended **physdev_list,
+       struct report_log_lun_extended **logdev_list)
+{
+       int rc;
+       size_t logdev_list_length;
+       size_t logdev_data_length;
+       struct report_log_lun_extended *internal_logdev_list;
+       struct report_log_lun_extended *logdev_data;
+       struct report_lun_header report_lun_header;
+
+       rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
+       if (rc)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "report physical LUNs failed\n");
+
+       rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
+       if (rc)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "report logical LUNs failed\n");
+
+       /*
+        * Tack the controller itself onto the end of the logical device list.
+        */
+
+       logdev_data = *logdev_list;
+
+       if (logdev_data) {
+               logdev_list_length =
+                       get_unaligned_be32(&logdev_data->header.list_length);
+       } else {
+               memset(&report_lun_header, 0, sizeof(report_lun_header));
+               logdev_data =
+                       (struct report_log_lun_extended *)&report_lun_header;
+               logdev_list_length = 0;
+       }
+
+       logdev_data_length = sizeof(struct report_lun_header) +
+               logdev_list_length;
+
+       internal_logdev_list = kmalloc(logdev_data_length +
+               sizeof(struct report_log_lun_extended), GFP_KERNEL);
+       if (!internal_logdev_list) {
+               kfree(*logdev_list);
+               *logdev_list = NULL;
+               return -ENOMEM;
+       }
+
+       memcpy(internal_logdev_list, logdev_data, logdev_data_length);
+       memset((u8 *)internal_logdev_list + logdev_data_length, 0,
+               sizeof(struct report_log_lun_extended_entry));
+       put_unaligned_be32(logdev_list_length +
+               sizeof(struct report_log_lun_extended_entry),
+               &internal_logdev_list->header.list_length);
+
+       kfree(*logdev_list);
+       *logdev_list = internal_logdev_list;
+
+       return 0;
+}
+
+static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
+       int bus, int target, int lun)
+{
+       device->bus = bus;
+       device->target = target;
+       device->lun = lun;
+}
+
+static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
+{
+       u8 *scsi3addr;
+       u32 lunid;
+
+       scsi3addr = device->scsi3addr;
+       lunid = get_unaligned_le32(scsi3addr);
+
+       if (pqi_is_hba_lunid(scsi3addr)) {
+               /* The specified device is the controller. */
+               pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
+               device->target_lun_valid = true;
+               return;
+       }
+
+       if (pqi_is_logical_device(device)) {
+               pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
+                       lunid & 0x3fff);
+               device->target_lun_valid = true;
+               return;
+       }
+
+       /*
+        * Defer target and LUN assignment for non-controller physical devices
+        * because the SAS transport layer will make these assignments later.
+        */
+       pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
+}
+
+static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       u8 raid_level;
+       u8 *buffer;
+
+       raid_level = SA_RAID_UNKNOWN;
+
+       buffer = kmalloc(64, GFP_KERNEL);
+       if (buffer) {
+               rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+                       VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
+               if (rc == 0) {
+                       raid_level = buffer[8];
+                       if (raid_level > SA_RAID_MAX)
+                               raid_level = SA_RAID_UNKNOWN;
+               }
+               kfree(buffer);
+       }
+
+       device->raid_level = raid_level;
+}
+
+static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct raid_map *raid_map)
+{
+       char *err_msg;
+       u32 raid_map_size;
+       u32 r5or6_blocks_per_row;
+       unsigned int num_phys_disks;
+       unsigned int num_raid_map_entries;
+
+       raid_map_size = get_unaligned_le32(&raid_map->structure_size);
+
+       if (raid_map_size < offsetof(struct raid_map, disk_data)) {
+               err_msg = "RAID map too small";
+               goto bad_raid_map;
+       }
+
+       if (raid_map_size > sizeof(*raid_map)) {
+               err_msg = "RAID map too large";
+               goto bad_raid_map;
+       }
+
+       num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
+               (get_unaligned_le16(&raid_map->data_disks_per_row) +
+               get_unaligned_le16(&raid_map->metadata_disks_per_row));
+       num_raid_map_entries = num_phys_disks *
+               get_unaligned_le16(&raid_map->row_cnt);
+
+       if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
+               err_msg = "invalid number of map entries in RAID map";
+               goto bad_raid_map;
+       }
+
+       if (device->raid_level == SA_RAID_1) {
+               if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
+                       err_msg = "invalid RAID-1 map";
+                       goto bad_raid_map;
+               }
+       } else if (device->raid_level == SA_RAID_ADM) {
+               if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
+                       err_msg = "invalid RAID-1(ADM) map";
+                       goto bad_raid_map;
+               }
+       } else if ((device->raid_level == SA_RAID_5 ||
+               device->raid_level == SA_RAID_6) &&
+               get_unaligned_le16(&raid_map->layout_map_count) > 1) {
+               /* RAID 50/60 */
+               r5or6_blocks_per_row =
+                       get_unaligned_le16(&raid_map->strip_size) *
+                       get_unaligned_le16(&raid_map->data_disks_per_row);
+               if (r5or6_blocks_per_row == 0) {
+                       err_msg = "invalid RAID-5 or RAID-6 map";
+                       goto bad_raid_map;
+               }
+       }
+
+       return 0;
+
+bad_raid_map:
+       dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
+
+       return -EINVAL;
+}
+
+static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       int pci_direction;
+       struct pqi_raid_path_request request;
+       struct raid_map *raid_map;
+
+       raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
+       if (!raid_map)
+               return -ENOMEM;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request,
+               CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
+               sizeof(*raid_map), 0, &pci_direction);
+       if (rc)
+               goto error;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+               NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+               pci_direction);
+
+       if (rc)
+               goto error;
+
+       rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
+       if (rc)
+               goto error;
+
+       device->raid_map = raid_map;
+
+       return 0;
+
+error:
+       kfree(raid_map);
+
+       return rc;
+}
+
+static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       u8 *buffer;
+       u8 offload_status;
+
+       buffer = kmalloc(64, GFP_KERNEL);
+       if (!buffer)
+               return;
+
+       rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+               VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
+       if (rc)
+               goto out;
+
+#define OFFLOAD_STATUS_BYTE    4
+#define OFFLOAD_CONFIGURED_BIT 0x1
+#define OFFLOAD_ENABLED_BIT    0x2
+
+       offload_status = buffer[OFFLOAD_STATUS_BYTE];
+       device->offload_configured =
+               !!(offload_status & OFFLOAD_CONFIGURED_BIT);
+       if (device->offload_configured) {
+               device->offload_enabled_pending =
+                       !!(offload_status & OFFLOAD_ENABLED_BIT);
+               if (pqi_get_raid_map(ctrl_info, device))
+                       device->offload_enabled_pending = false;
+       }
+
+out:
+       kfree(buffer);
+}
+
+/*
+ * Use vendor-specific VPD to determine online/offline status of a volume.
+ */
+
+static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       size_t page_length;
+       u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
+       bool volume_offline = true;
+       u32 volume_flags;
+       struct ciss_vpd_logical_volume_status *vpd;
+
+       vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
+       if (!vpd)
+               goto no_buffer;
+
+       rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+               VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
+       if (rc)
+               goto out;
+
+       page_length = offsetof(struct ciss_vpd_logical_volume_status,
+               volume_status) + vpd->page_length;
+       if (page_length < sizeof(*vpd))
+               goto out;
+
+       volume_status = vpd->volume_status;
+       volume_flags = get_unaligned_be32(&vpd->flags);
+       volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
+
+out:
+       kfree(vpd);
+no_buffer:
+       device->volume_status = volume_status;
+       device->volume_offline = volume_offline;
+}
+
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       u8 *buffer;
+
+       buffer = kmalloc(64, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       /* Send an inquiry to the device to see what it is. */
+       rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+       if (rc)
+               goto out;
+
+       scsi_sanitize_inquiry_string(&buffer[8], 8);
+       scsi_sanitize_inquiry_string(&buffer[16], 16);
+
+       device->devtype = buffer[0] & 0x1f;
+       memcpy(device->vendor, &buffer[8],
+               sizeof(device->vendor));
+       memcpy(device->model, &buffer[16],
+               sizeof(device->model));
+
+       if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+               pqi_get_raid_level(ctrl_info, device);
+               pqi_get_offload_status(ctrl_info, device);
+               pqi_get_volume_status(ctrl_info, device);
+       }
+
+out:
+       kfree(buffer);
+
+       return rc;
+}
+
+static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device,
+       struct bmic_identify_physical_device *id_phys)
+{
+       int rc;
+
+       memset(id_phys, 0, sizeof(*id_phys));
+
+       rc = pqi_identify_physical_device(ctrl_info, device,
+               id_phys, sizeof(*id_phys));
+       if (rc) {
+               device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+               return;
+       }
+
+       device->queue_depth =
+               get_unaligned_le16(&id_phys->current_queue_depth_limit);
+       device->device_type = id_phys->device_type;
+       device->active_path_index = id_phys->active_path_number;
+       device->path_map = id_phys->redundant_path_present_map;
+       memcpy(&device->box,
+               &id_phys->alternate_paths_phys_box_on_port,
+               sizeof(device->box));
+       memcpy(&device->phys_connector,
+               &id_phys->alternate_paths_phys_connector,
+               sizeof(device->phys_connector));
+       device->bay = id_phys->phys_bay_in_box;
+}
+
+static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       char *status;
+       static const char unknown_state_str[] =
+               "Volume is in an unknown state (%u)";
+       char unknown_state_buffer[sizeof(unknown_state_str) + 10];
+
+       switch (device->volume_status) {
+       case CISS_LV_OK:
+               status = "Volume online";
+               break;
+       case CISS_LV_FAILED:
+               status = "Volume failed";
+               break;
+       case CISS_LV_NOT_CONFIGURED:
+               status = "Volume not configured";
+               break;
+       case CISS_LV_DEGRADED:
+               status = "Volume degraded";
+               break;
+       case CISS_LV_READY_FOR_RECOVERY:
+               status = "Volume ready for recovery operation";
+               break;
+       case CISS_LV_UNDERGOING_RECOVERY:
+               status = "Volume undergoing recovery";
+               break;
+       case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
+               status = "Wrong physical drive was replaced";
+               break;
+       case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
+               status = "A physical drive not properly connected";
+               break;
+       case CISS_LV_HARDWARE_OVERHEATING:
+               status = "Hardware is overheating";
+               break;
+       case CISS_LV_HARDWARE_HAS_OVERHEATED:
+               status = "Hardware has overheated";
+               break;
+       case CISS_LV_UNDERGOING_EXPANSION:
+               status = "Volume undergoing expansion";
+               break;
+       case CISS_LV_NOT_AVAILABLE:
+               status = "Volume waiting for transforming volume";
+               break;
+       case CISS_LV_QUEUED_FOR_EXPANSION:
+               status = "Volume queued for expansion";
+               break;
+       case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
+               status = "Volume disabled due to SCSI ID conflict";
+               break;
+       case CISS_LV_EJECTED:
+               status = "Volume has been ejected";
+               break;
+       case CISS_LV_UNDERGOING_ERASE:
+               status = "Volume undergoing background erase";
+               break;
+       case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
+               status = "Volume ready for predictive spare rebuild";
+               break;
+       case CISS_LV_UNDERGOING_RPI:
+               status = "Volume undergoing rapid parity initialization";
+               break;
+       case CISS_LV_PENDING_RPI:
+               status = "Volume queued for rapid parity initialization";
+               break;
+       case CISS_LV_ENCRYPTED_NO_KEY:
+               status = "Encrypted volume inaccessible - key not present";
+               break;
+       case CISS_LV_UNDERGOING_ENCRYPTION:
+               status = "Volume undergoing encryption process";
+               break;
+       case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
+               status = "Volume undergoing encryption re-keying process";
+               break;
+       case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+               status =
+                       "Encrypted volume inaccessible - disabled on ctrl";
+               break;
+       case CISS_LV_PENDING_ENCRYPTION:
+               status = "Volume pending migration to encrypted state";
+               break;
+       case CISS_LV_PENDING_ENCRYPTION_REKEYING:
+               status = "Volume pending encryption rekeying";
+               break;
+       case CISS_LV_NOT_SUPPORTED:
+               status = "Volume not supported on this controller";
+               break;
+       case CISS_LV_STATUS_UNAVAILABLE:
+               status = "Volume status not available";
+               break;
+       default:
+               snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
+                       unknown_state_str, device->volume_status);
+               status = unknown_state_buffer;
+               break;
+       }
+
+       dev_info(&ctrl_info->pci_dev->dev,
+               "scsi %d:%d:%d:%d %s\n",
+               ctrl_info->scsi_host->host_no,
+               device->bus, device->target, device->lun, status);
+}
+
+static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
+       struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
+{
+       struct pqi_scsi_dev *device;
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
+                       continue;
+               if (pqi_is_logical_device(device))
+                       continue;
+               if (device->aio_handle == aio_handle)
+                       return device;
+       }
+
+       return NULL;
+}
+
+static void pqi_update_logical_drive_queue_depth(
+       struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
+{
+       unsigned int i;
+       struct raid_map *raid_map;
+       struct raid_map_disk_data *disk_data;
+       struct pqi_scsi_dev *phys_disk;
+       unsigned int num_phys_disks;
+       unsigned int num_raid_map_entries;
+       unsigned int queue_depth;
+
+       logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
+
+       raid_map = logical_drive->raid_map;
+       if (!raid_map)
+               return;
+
+       disk_data = raid_map->disk_data;
+       num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
+               (get_unaligned_le16(&raid_map->data_disks_per_row) +
+               get_unaligned_le16(&raid_map->metadata_disks_per_row));
+       num_raid_map_entries = num_phys_disks *
+               get_unaligned_le16(&raid_map->row_cnt);
+
+       queue_depth = 0;
+       for (i = 0; i < num_raid_map_entries; i++) {
+               phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
+                       disk_data[i].aio_handle);
+
+               if (!phys_disk) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "failed to find physical disk for logical drive %016llx\n",
+                               get_unaligned_be64(logical_drive->scsi3addr));
+                       logical_drive->offload_enabled = false;
+                       logical_drive->offload_enabled_pending = false;
+                       kfree(raid_map);
+                       logical_drive->raid_map = NULL;
+                       return;
+               }
+
+               queue_depth += phys_disk->queue_depth;
+       }
+
+       logical_drive->queue_depth = queue_depth;
+}
+
+static void pqi_update_all_logical_drive_queue_depths(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       struct pqi_scsi_dev *device;
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
+                       continue;
+               if (!pqi_is_logical_device(device))
+                       continue;
+               pqi_update_logical_drive_queue_depth(ctrl_info, device);
+       }
+}
+
+static void pqi_rescan_worker(struct work_struct *work)
+{
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
+               rescan_work);
+
+       pqi_scan_scsi_devices(ctrl_info);
+}
+
+static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+
+       if (pqi_is_logical_device(device))
+               rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
+                       device->target, device->lun);
+       else
+               rc = pqi_add_sas_device(ctrl_info->sas_host, device);
+
+       return rc;
+}
+
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       if (pqi_is_logical_device(device))
+               scsi_remove_device(device->sdev);
+       else
+               pqi_remove_sas_device(device);
+}
+
+/* Assumes the SCSI device list lock is held. */
+
+static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
+       int bus, int target, int lun)
+{
+       struct pqi_scsi_dev *device;
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry)
+               if (device->bus == bus && device->target == target &&
+                       device->lun == lun)
+                       return device;
+
+       return NULL;
+}
+
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
+       struct pqi_scsi_dev *dev2)
+{
+       if (dev1->is_physical_device != dev2->is_physical_device)
+               return false;
+
+       if (dev1->is_physical_device)
+               return dev1->wwid == dev2->wwid;
+
+       return memcmp(dev1->volume_id, dev2->volume_id,
+               sizeof(dev1->volume_id)) == 0;
+}
+
+enum pqi_find_result {
+       DEVICE_NOT_FOUND,
+       DEVICE_CHANGED,
+       DEVICE_SAME,
+};
+
+static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device_to_find,
+       struct pqi_scsi_dev **matching_device)
+{
+       struct pqi_scsi_dev *device;
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
+                       device->scsi3addr)) {
+                       *matching_device = device;
+                       if (pqi_device_equal(device_to_find, device)) {
+                               if (device_to_find->volume_offline)
+                                       return DEVICE_CHANGED;
+                               return DEVICE_SAME;
+                       }
+                       return DEVICE_CHANGED;
+               }
+       }
+
+       return DEVICE_NOT_FOUND;
+}
+
+static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
+       char *action, struct pqi_scsi_dev *device)
+{
+       dev_info(&ctrl_info->pci_dev->dev,
+               "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
+               action,
+               ctrl_info->scsi_host->host_no,
+               device->bus,
+               device->target,
+               device->lun,
+               scsi_device_type(device->devtype),
+               device->vendor,
+               device->model,
+               pqi_raid_level_to_string(device->raid_level),
+               device->offload_configured ? '+' : '-',
+               device->offload_enabled_pending ? '+' : '-',
+               device->expose_device ? '+' : '-',
+               device->queue_depth);
+}
+
+/* Assumes the SCSI device list lock is held. */
+
+static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
+       struct pqi_scsi_dev *new_device)
+{
+       existing_device->devtype = new_device->devtype;
+       existing_device->device_type = new_device->device_type;
+       existing_device->bus = new_device->bus;
+       if (new_device->target_lun_valid) {
+               existing_device->target = new_device->target;
+               existing_device->lun = new_device->lun;
+               existing_device->target_lun_valid = true;
+       }
+
+       /* By definition, the scsi3addr and wwid fields are already the same. */
+
+       existing_device->is_physical_device = new_device->is_physical_device;
+       existing_device->expose_device = new_device->expose_device;
+       existing_device->no_uld_attach = new_device->no_uld_attach;
+       existing_device->aio_enabled = new_device->aio_enabled;
+       memcpy(existing_device->vendor, new_device->vendor,
+               sizeof(existing_device->vendor));
+       memcpy(existing_device->model, new_device->model,
+               sizeof(existing_device->model));
+       existing_device->sas_address = new_device->sas_address;
+       existing_device->raid_level = new_device->raid_level;
+       existing_device->queue_depth = new_device->queue_depth;
+       existing_device->aio_handle = new_device->aio_handle;
+       existing_device->volume_status = new_device->volume_status;
+       existing_device->active_path_index = new_device->active_path_index;
+       existing_device->path_map = new_device->path_map;
+       existing_device->bay = new_device->bay;
+       memcpy(existing_device->box, new_device->box,
+               sizeof(existing_device->box));
+       memcpy(existing_device->phys_connector, new_device->phys_connector,
+               sizeof(existing_device->phys_connector));
+       existing_device->offload_configured = new_device->offload_configured;
+       existing_device->offload_enabled = false;
+       existing_device->offload_enabled_pending =
+               new_device->offload_enabled_pending;
+       existing_device->offload_to_mirror = 0;
+       kfree(existing_device->raid_map);
+       existing_device->raid_map = new_device->raid_map;
+
+       /* To prevent this from being freed later. */
+       new_device->raid_map = NULL;
+}
+
+static inline void pqi_free_device(struct pqi_scsi_dev *device)
+{
+       if (device) {
+               kfree(device->raid_map);
+               kfree(device);
+       }
+}
+
+/*
+ * Called when exposing a new device to the OS fails in order to re-adjust
+ * our internal SCSI device list to match the SCSI ML's view.
+ */
+
+static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+       list_del(&device->scsi_device_list_entry);
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+       /* Allow the device structure to be freed later. */
+       device->keep_device = false;
+}
+
+static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
+{
+       int rc;
+       unsigned int i;
+       unsigned long flags;
+       enum pqi_find_result find_result;
+       struct pqi_scsi_dev *device;
+       struct pqi_scsi_dev *next;
+       struct pqi_scsi_dev *matching_device;
+       struct list_head add_list;
+       struct list_head delete_list;
+
+       INIT_LIST_HEAD(&add_list);
+       INIT_LIST_HEAD(&delete_list);
+
+       /*
+        * The idea here is to do as little work as possible while holding the
+        * spinlock.  That's why we go to great pains to defer anything other
+        * than updating the internal device list until after we release the
+        * spinlock.
+        */
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+       /* Assume that all devices in the existing list have gone away. */
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry)
+               device->device_gone = true;
+
+       for (i = 0; i < num_new_devices; i++) {
+               device = new_device_list[i];
+
+               find_result = pqi_scsi_find_entry(ctrl_info, device,
+                                               &matching_device);
+
+               switch (find_result) {
+               case DEVICE_SAME:
+                       /*
+                        * The newly found device is already in the existing
+                        * device list.
+                        */
+                       device->new_device = false;
+                       matching_device->device_gone = false;
+                       pqi_scsi_update_device(matching_device, device);
+                       break;
+               case DEVICE_NOT_FOUND:
+                       /*
+                        * The newly found device is NOT in the existing device
+                        * list.
+                        */
+                       device->new_device = true;
+                       break;
+               case DEVICE_CHANGED:
+                       /*
+                        * The original device has gone away and we need to add
+                        * the new device.
+                        */
+                       device->new_device = true;
+                       break;
+               default:
+                       WARN_ON(find_result);
+                       break;
+               }
+       }
+
+       /* Process all devices that have gone away. */
+       list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (device->device_gone) {
+                       list_del(&device->scsi_device_list_entry);
+                       list_add_tail(&device->delete_list_entry, &delete_list);
+               }
+       }
+
+       /* Process all new devices. */
+       for (i = 0; i < num_new_devices; i++) {
+               device = new_device_list[i];
+               if (!device->new_device)
+                       continue;
+               if (device->volume_offline)
+                       continue;
+               list_add_tail(&device->scsi_device_list_entry,
+                       &ctrl_info->scsi_device_list);
+               list_add_tail(&device->add_list_entry, &add_list);
+               /* To prevent this device structure from being freed later. */
+               device->keep_device = true;
+       }
+
+       pqi_update_all_logical_drive_queue_depths(ctrl_info);
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry)
+               device->offload_enabled =
+                       device->offload_enabled_pending;
+
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+       /* Remove all devices that have gone away. */
+       list_for_each_entry_safe(device, next, &delete_list,
+               delete_list_entry) {
+               if (device->sdev)
+                       pqi_remove_device(ctrl_info, device);
+               if (device->volume_offline) {
+                       pqi_dev_info(ctrl_info, "offline", device);
+                       pqi_show_volume_status(ctrl_info, device);
+               } else {
+                       pqi_dev_info(ctrl_info, "removed", device);
+               }
+               list_del(&device->delete_list_entry);
+               pqi_free_device(device);
+       }
+
+       /*
+        * Notify the SCSI ML if the queue depth of any existing device has
+        * changed.
+        */
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (device->sdev && device->queue_depth !=
+                       device->advertised_queue_depth) {
+                       device->advertised_queue_depth = device->queue_depth;
+                       scsi_change_queue_depth(device->sdev,
+                               device->advertised_queue_depth);
+               }
+       }
+
+       /* Expose any new devices. */
+       list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
+               if (device->expose_device && !device->sdev) {
+                       rc = pqi_add_device(ctrl_info, device);
+                       if (rc) {
+                               dev_warn(&ctrl_info->pci_dev->dev,
+                                       "scsi %d:%d:%d:%d addition failed, device not added\n",
+                                       ctrl_info->scsi_host->host_no,
+                                       device->bus, device->target,
+                                       device->lun);
+                               pqi_fixup_botched_add(ctrl_info, device);
+                               continue;
+                       }
+               }
+               pqi_dev_info(ctrl_info, "added", device);
+       }
+}
+
+static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+{
+       bool is_supported = false;
+
+       switch (device->devtype) {
+       case TYPE_DISK:
+       case TYPE_ZBC:
+       case TYPE_TAPE:
+       case TYPE_MEDIUM_CHANGER:
+       case TYPE_ENCLOSURE:
+               is_supported = true;
+               break;
+       case TYPE_RAID:
+               /*
+                * Only support the HBA controller itself as a RAID
+                * controller.  If it's a RAID controller other than
+                * the HBA itself (an external RAID controller, MSA500
+                * or similar), we don't support it.
+                */
+               if (pqi_is_hba_lunid(device->scsi3addr))
+                       is_supported = true;
+               break;
+       }
+
+       return is_supported;
+}
+
+static inline bool pqi_skip_device(u8 *scsi3addr,
+       struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+{
+       u8 device_flags;
+
+       if (!MASKED_DEVICE(scsi3addr))
+               return false;
+
+       /* The device is masked. */
+
+       device_flags = phys_lun_ext_entry->device_flags;
+
+       if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
+               /*
+                * It's a non-disk device.  We ignore all devices of this type
+                * when they're masked.
+                */
+               return true;
+       }
+
+       return false;
+}
+
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+{
+       /* Expose all devices except for physical devices that are masked. */
+       if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
+               return false;
+
+       return true;
+}
+
+static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+       int i;
+       int rc;
+       struct list_head new_device_list_head;
+       struct report_phys_lun_extended *physdev_list = NULL;
+       struct report_log_lun_extended *logdev_list = NULL;
+       struct report_phys_lun_extended_entry *phys_lun_ext_entry;
+       struct report_log_lun_extended_entry *log_lun_ext_entry;
+       struct bmic_identify_physical_device *id_phys = NULL;
+       u32 num_physicals;
+       u32 num_logicals;
+       struct pqi_scsi_dev **new_device_list = NULL;
+       struct pqi_scsi_dev *device;
+       struct pqi_scsi_dev *next;
+       unsigned int num_new_devices;
+       unsigned int num_valid_devices;
+       bool is_physical_device;
+       u8 *scsi3addr;
+       static char *out_of_memory_msg =
+               "out of memory, device discovery stopped";
+
+       INIT_LIST_HEAD(&new_device_list_head);
+
+       rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
+       if (rc)
+               goto out;
+
+       if (physdev_list)
+               num_physicals =
+                       get_unaligned_be32(&physdev_list->header.list_length)
+                               / sizeof(physdev_list->lun_entries[0]);
+       else
+               num_physicals = 0;
+
+       if (logdev_list)
+               num_logicals =
+                       get_unaligned_be32(&logdev_list->header.list_length)
+                               / sizeof(logdev_list->lun_entries[0]);
+       else
+               num_logicals = 0;
+
+       if (num_physicals) {
+               /*
+                * We need this buffer for calls to pqi_get_physical_disk_info()
+                * below.  We allocate it here instead of inside
+                * pqi_get_physical_disk_info() because it's a fairly large
+                * buffer.
+                */
+               id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
+               if (!id_phys) {
+                       dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+                               out_of_memory_msg);
+                       rc = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       num_new_devices = num_physicals + num_logicals;
+
+       new_device_list = kmalloc(sizeof(*new_device_list) *
+               num_new_devices, GFP_KERNEL);
+       if (!new_device_list) {
+               dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < num_new_devices; i++) {
+               device = kzalloc(sizeof(*device), GFP_KERNEL);
+               if (!device) {
+                       dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+                               out_of_memory_msg);
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               list_add_tail(&device->new_device_list_entry,
+                       &new_device_list_head);
+       }
+
+       device = NULL;
+       num_valid_devices = 0;
+
+       for (i = 0; i < num_new_devices; i++) {
+
+               if (i < num_physicals) {
+                       is_physical_device = true;
+                       phys_lun_ext_entry = &physdev_list->lun_entries[i];
+                       log_lun_ext_entry = NULL;
+                       scsi3addr = phys_lun_ext_entry->lunid;
+               } else {
+                       is_physical_device = false;
+                       phys_lun_ext_entry = NULL;
+                       log_lun_ext_entry =
+                               &logdev_list->lun_entries[i - num_physicals];
+                       scsi3addr = log_lun_ext_entry->lunid;
+               }
+
+               if (is_physical_device &&
+                       pqi_skip_device(scsi3addr, phys_lun_ext_entry))
+                       continue;
+
+               if (device)
+                       device = list_next_entry(device, new_device_list_entry);
+               else
+                       device = list_first_entry(&new_device_list_head,
+                               struct pqi_scsi_dev, new_device_list_entry);
+
+               memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+               device->is_physical_device = is_physical_device;
+               device->raid_level = SA_RAID_UNKNOWN;
+
+               /* Gather information about the device. */
+               rc = pqi_get_device_info(ctrl_info, device);
+               if (rc == -ENOMEM) {
+                       dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+                               out_of_memory_msg);
+                       goto out;
+               }
+               if (rc) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "obtaining device info failed, skipping device %016llx\n",
+                               get_unaligned_be64(device->scsi3addr));
+                       rc = 0;
+                       continue;
+               }
+
+               if (!pqi_is_supported_device(device))
+                       continue;
+
+               pqi_assign_bus_target_lun(device);
+
+               device->expose_device = pqi_expose_device(device);
+
+               if (device->is_physical_device) {
+                       device->wwid = phys_lun_ext_entry->wwid;
+                       if ((phys_lun_ext_entry->device_flags &
+                               REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+                               phys_lun_ext_entry->aio_handle)
+                               device->aio_enabled = true;
+               } else {
+                       memcpy(device->volume_id, log_lun_ext_entry->volume_id,
+                               sizeof(device->volume_id));
+               }
+
+               switch (device->devtype) {
+               case TYPE_DISK:
+               case TYPE_ZBC:
+               case TYPE_ENCLOSURE:
+                       if (device->is_physical_device) {
+                               device->sas_address =
+                                       get_unaligned_be64(&device->wwid);
+                               if (device->devtype == TYPE_DISK ||
+                                       device->devtype == TYPE_ZBC) {
+                                       device->aio_handle =
+                                               phys_lun_ext_entry->aio_handle;
+                                       pqi_get_physical_disk_info(ctrl_info,
+                                               device, id_phys);
+                               }
+                       }
+                       break;
+               }
+
+               new_device_list[num_valid_devices++] = device;
+       }
+
+       pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
+
+out:
+       list_for_each_entry_safe(device, next, &new_device_list_head,
+               new_device_list_entry) {
+               if (device->keep_device)
+                       continue;
+               list_del(&device->new_device_list_entry);
+               pqi_free_device(device);
+       }
+
+       kfree(new_device_list);
+       kfree(physdev_list);
+       kfree(logdev_list);
+       kfree(id_phys);
+
+       return rc;
+}
+
+static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned long flags;
+       struct pqi_scsi_dev *device;
+       struct pqi_scsi_dev *next;
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+       list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (device->sdev)
+                       pqi_remove_device(ctrl_info, device);
+               list_del(&device->scsi_device_list_entry);
+               pqi_free_device(device);
+       }
+
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
+static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+
+       if (pqi_ctrl_offline(ctrl_info))
+               return -ENXIO;
+
+       mutex_lock(&ctrl_info->scan_mutex);
+
+       rc = pqi_update_scsi_devices(ctrl_info);
+       if (rc)
+               pqi_schedule_rescan_worker(ctrl_info);
+
+       mutex_unlock(&ctrl_info->scan_mutex);
+
+       return rc;
+}
+
+static void pqi_scan_start(struct Scsi_Host *shost)
+{
+       pqi_scan_scsi_devices(shost_to_hba(shost));
+}
+
+/* Returns TRUE if scan is finished. */
+
+static int pqi_scan_finished(struct Scsi_Host *shost,
+       unsigned long elapsed_time)
+{
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = shost_priv(shost);
+
+       return !mutex_is_locked(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_set_encryption_info(
+       struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
+       u64 first_block)
+{
+       u32 volume_blk_size;
+
+       /*
+        * Set the encryption tweak values based on logical block address.
+        * If the block size is 512, the tweak value is equal to the LBA.
+        * For other block sizes, tweak value is (LBA * block size) / 512.
+        */
+       volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
+       if (volume_blk_size != 512)
+               first_block = (first_block * volume_blk_size) / 512;
+
+       encryption_info->data_encryption_key_index =
+               get_unaligned_le16(&raid_map->data_encryption_key_index);
+       encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
+       encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define PQI_RAID_BYPASS_INELIGIBLE     1
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+       struct pqi_queue_group *queue_group)
+{
+       struct raid_map *raid_map;
+       bool is_write = false;
+       u32 map_index;
+       u64 first_block;
+       u64 last_block;
+       u32 block_cnt;
+       u32 blocks_per_row;
+       u64 first_row;
+       u64 last_row;
+       u32 first_row_offset;
+       u32 last_row_offset;
+       u32 first_column;
+       u32 last_column;
+       u64 r0_first_row;
+       u64 r0_last_row;
+       u32 r5or6_blocks_per_row;
+       u64 r5or6_first_row;
+       u64 r5or6_last_row;
+       u32 r5or6_first_row_offset;
+       u32 r5or6_last_row_offset;
+       u32 r5or6_first_column;
+       u32 r5or6_last_column;
+       u16 data_disks_per_row;
+       u32 total_disks_per_row;
+       u16 layout_map_count;
+       u32 stripesize;
+       u16 strip_size;
+       u32 first_group;
+       u32 last_group;
+       u32 current_group;
+       u32 map_row;
+       u32 aio_handle;
+       u64 disk_block;
+       u32 disk_block_cnt;
+       u8 cdb[16];
+       u8 cdb_length;
+       int offload_to_mirror;
+       struct pqi_encryption_info *encryption_info_ptr;
+       struct pqi_encryption_info encryption_info;
+#if BITS_PER_LONG == 32
+       u64 tmpdiv;
+#endif
+
+       /* Check for valid opcode, get LBA and block count. */
+       switch (scmd->cmnd[0]) {
+       case WRITE_6:
+               is_write = true;
+               /* fall through */
+       case READ_6:
+               first_block = (u64)get_unaligned_be16(&scmd->cmnd[2]);
+               block_cnt = (u32)scmd->cmnd[4];
+               if (block_cnt == 0)
+                       block_cnt = 256;
+               break;
+       case WRITE_10:
+               is_write = true;
+               /* fall through */
+       case READ_10:
+               first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+               block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+               break;
+       case WRITE_12:
+               is_write = true;
+               /* fall through */
+       case READ_12:
+               first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+               block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+               break;
+       case WRITE_16:
+               is_write = true;
+               /* fall through */
+       case READ_16:
+               first_block = get_unaligned_be64(&scmd->cmnd[2]);
+               block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+               break;
+       default:
+               /* Process via normal I/O path. */
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       }
+
+       /* Check for write to non-RAID-0. */
+       if (is_write && device->raid_level != SA_RAID_0)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       if (unlikely(block_cnt == 0))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       last_block = first_block + block_cnt - 1;
+       raid_map = device->raid_map;
+
+       /* Check for invalid block or wraparound. */
+       if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+               last_block < first_block)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
+       strip_size = get_unaligned_le16(&raid_map->strip_size);
+       layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+
+       /* Calculate stripe information for the request. */
+       blocks_per_row = data_disks_per_row * strip_size;
+#if BITS_PER_LONG == 32
+       tmpdiv = first_block;
+       do_div(tmpdiv, blocks_per_row);
+       first_row = tmpdiv;
+       tmpdiv = last_block;
+       do_div(tmpdiv, blocks_per_row);
+       last_row = tmpdiv;
+       first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
+       last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
+       tmpdiv = first_row_offset;
+       do_div(tmpdiv, strip_size);
+       first_column = tmpdiv;
+       tmpdiv = last_row_offset;
+       do_div(tmpdiv, strip_size);
+       last_column = tmpdiv;
+#else
+       first_row = first_block / blocks_per_row;
+       last_row = last_block / blocks_per_row;
+       first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
+       last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
+       first_column = first_row_offset / strip_size;
+       last_column = last_row_offset / strip_size;
+#endif
+
+       /* If this isn't a single row/column then give to the controller. */
+       if (first_row != last_row || first_column != last_column)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       /* Proceeding with driver mapping. */
+       total_disks_per_row = data_disks_per_row +
+               get_unaligned_le16(&raid_map->metadata_disks_per_row);
+       map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+               get_unaligned_le16(&raid_map->row_cnt);
+       map_index = (map_row * total_disks_per_row) + first_column;
+
+       /* RAID 1 */
+       if (device->raid_level == SA_RAID_1) {
+               if (device->offload_to_mirror)
+                       map_index += data_disks_per_row;
+               device->offload_to_mirror = !device->offload_to_mirror;
+       } else if (device->raid_level == SA_RAID_ADM) {
+               /* RAID ADM */
+               /*
+                * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
+                * divisible by 3.
+                */
+               offload_to_mirror = device->offload_to_mirror;
+               if (offload_to_mirror == 0)  {
+                       /* use physical disk in the first mirrored group. */
+                       map_index %= data_disks_per_row;
+               } else {
+                       do {
+                               /*
+                                * Determine mirror group that map_index
+                                * indicates.
+                                */
+                               current_group = map_index / data_disks_per_row;
+
+                               if (offload_to_mirror != current_group) {
+                                       if (current_group <
+                                               layout_map_count - 1) {
+                                               /*
+                                                * Select raid index from
+                                                * next group.
+                                                */
+                                               map_index += data_disks_per_row;
+                                               current_group++;
+                                       } else {
+                                               /*
+                                                * Select raid index from first
+                                                * group.
+                                                */
+                                               map_index %= data_disks_per_row;
+                                               current_group = 0;
+                                       }
+                               }
+                       } while (offload_to_mirror != current_group);
+               }
+
+               /* Set mirror group to use next time. */
+               offload_to_mirror =
+                       (offload_to_mirror >= layout_map_count - 1) ?
+                               0 : offload_to_mirror + 1;
+               WARN_ON(offload_to_mirror >= layout_map_count);
+               device->offload_to_mirror = offload_to_mirror;
+               /*
+                * Avoid direct use of device->offload_to_mirror within this
+                * function since multiple threads might simultaneously
+                * increment it beyond the range of device->layout_map_count -1.
+                */
+       } else if ((device->raid_level == SA_RAID_5 ||
+               device->raid_level == SA_RAID_6) && layout_map_count > 1) {
+               /* RAID 50/60 */
+               /* Verify first and last block are in same RAID group */
+               r5or6_blocks_per_row = strip_size * data_disks_per_row;
+               stripesize = r5or6_blocks_per_row * layout_map_count;
+#if BITS_PER_LONG == 32
+               tmpdiv = first_block;
+               first_group = do_div(tmpdiv, stripesize);
+               tmpdiv = first_group;
+               do_div(tmpdiv, r5or6_blocks_per_row);
+               first_group = tmpdiv;
+               tmpdiv = last_block;
+               last_group = do_div(tmpdiv, stripesize);
+               tmpdiv = last_group;
+               do_div(tmpdiv, r5or6_blocks_per_row);
+               last_group = tmpdiv;
+#else
+               first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+               last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+               if (first_group != last_group)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
+
+               /* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+               tmpdiv = first_block;
+               do_div(tmpdiv, stripesize);
+               first_row = r5or6_first_row = r0_first_row = tmpdiv;
+               tmpdiv = last_block;
+               do_div(tmpdiv, stripesize);
+               r5or6_last_row = r0_last_row = tmpdiv;
+#else
+               first_row = r5or6_first_row = r0_first_row =
+                       first_block / stripesize;
+               r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+               if (r5or6_first_row != r5or6_last_row)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
+
+               /* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+               tmpdiv = first_block;
+               first_row_offset = do_div(tmpdiv, stripesize);
+               tmpdiv = first_row_offset;
+               first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
+               r5or6_first_row_offset = first_row_offset;
+               tmpdiv = last_block;
+               r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+               tmpdiv = r5or6_last_row_offset;
+               r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+               tmpdiv = r5or6_first_row_offset;
+               do_div(tmpdiv, strip_size);
+               first_column = r5or6_first_column = tmpdiv;
+               tmpdiv = r5or6_last_row_offset;
+               do_div(tmpdiv, strip_size);
+               r5or6_last_column = tmpdiv;
+#else
+               first_row_offset = r5or6_first_row_offset =
+                       (u32)((first_block % stripesize) %
+                       r5or6_blocks_per_row);
+
+               r5or6_last_row_offset =
+                       (u32)((last_block % stripesize) %
+                       r5or6_blocks_per_row);
+
+               first_column = r5or6_first_row_offset / strip_size;
+               r5or6_first_column = first_column;
+               r5or6_last_column = r5or6_last_row_offset / strip_size;
+#endif
+               if (r5or6_first_column != r5or6_last_column)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
+
+               /* Request is eligible */
+               map_row =
+                       ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+                       get_unaligned_le16(&raid_map->row_cnt);
+
+               map_index = (first_group *
+                       (get_unaligned_le16(&raid_map->row_cnt) *
+                       total_disks_per_row)) +
+                       (map_row * total_disks_per_row) + first_column;
+       }
+
+       if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       aio_handle = raid_map->disk_data[map_index].aio_handle;
+       disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+               first_row * strip_size +
+               (first_row_offset - first_column * strip_size);
+       disk_block_cnt = block_cnt;
+
+       /* Handle differing logical/physical block sizes. */
+       if (raid_map->phys_blk_shift) {
+               disk_block <<= raid_map->phys_blk_shift;
+               disk_block_cnt <<= raid_map->phys_blk_shift;
+       }
+
+       if (unlikely(disk_block_cnt > 0xffff))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       /* Build the new CDB for the physical disk I/O. */
+       if (disk_block > 0xffffffff) {
+               cdb[0] = is_write ? WRITE_16 : READ_16;
+               cdb[1] = 0;
+               put_unaligned_be64(disk_block, &cdb[2]);
+               put_unaligned_be32(disk_block_cnt, &cdb[10]);
+               cdb[14] = 0;
+               cdb[15] = 0;
+               cdb_length = 16;
+       } else {
+               cdb[0] = is_write ? WRITE_10 : READ_10;
+               cdb[1] = 0;
+               put_unaligned_be32((u32)disk_block, &cdb[2]);
+               cdb[6] = 0;
+               put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
+               cdb[9] = 0;
+               cdb_length = 10;
+       }
+
+       if (get_unaligned_le16(&raid_map->flags) &
+               RAID_MAP_ENCRYPTION_ENABLED) {
+               pqi_set_encryption_info(&encryption_info, raid_map,
+                       first_block);
+               encryption_info_ptr = &encryption_info;
+       } else {
+               encryption_info_ptr = NULL;
+       }
+
+       return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
+               cdb, cdb_length, queue_group, encryption_info_ptr);
+}
+
+#define PQI_STATUS_IDLE                0x0
+
+#define PQI_CREATE_ADMIN_QUEUE_PAIR    1
+#define PQI_DELETE_ADMIN_QUEUE_PAIR    2
+
+#define PQI_DEVICE_STATE_POWER_ON_AND_RESET            0x0
+#define PQI_DEVICE_STATE_STATUS_AVAILABLE              0x1
+#define PQI_DEVICE_STATE_ALL_REGISTERS_READY           0x2
+#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY                0x3
+#define PQI_DEVICE_STATE_ERROR                         0x4
+
+#define PQI_MODE_READY_TIMEOUT_SECS            30
+#define PQI_MODE_READY_POLL_INTERVAL_MSECS     1
+
+static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
+{
+       struct pqi_device_registers __iomem *pqi_registers;
+       unsigned long timeout;
+       u64 signature;
+       u8 status;
+
+       pqi_registers = ctrl_info->pqi_registers;
+       timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+       while (1) {
+               signature = readq(&pqi_registers->signature);
+               if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
+                       sizeof(signature)) == 0)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "timed out waiting for PQI signature\n");
+                       return -ETIMEDOUT;
+               }
+               msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+       }
+
+       while (1) {
+               status = readb(&pqi_registers->function_and_status_code);
+               if (status == PQI_STATUS_IDLE)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "timed out waiting for PQI IDLE\n");
+                       return -ETIMEDOUT;
+               }
+               msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+       }
+
+       while (1) {
+               if (readl(&pqi_registers->device_status) ==
+                       PQI_DEVICE_STATE_ALL_REGISTERS_READY)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "timed out waiting for PQI all registers ready\n");
+                       return -ETIMEDOUT;
+               }
+               msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+       }
+
+       return 0;
+}
+
+static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
+{
+       struct pqi_scsi_dev *device;
+
+       device = io_request->scmd->device->hostdata;
+       device->offload_enabled = false;
+}
+
+static inline void pqi_take_device_offline(struct scsi_device *sdev)
+{
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_scsi_dev *device;
+
+       if (scsi_device_online(sdev)) {
+               scsi_device_set_state(sdev, SDEV_OFFLINE);
+               ctrl_info = shost_to_hba(sdev->host);
+               schedule_delayed_work(&ctrl_info->rescan_work, 0);
+               device = sdev->hostdata;
+               dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
+                       ctrl_info->scsi_host->host_no, device->bus,
+                       device->target, device->lun);
+       }
+}
+
+static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
+{
+       u8 scsi_status;
+       u8 host_byte;
+       struct scsi_cmnd *scmd;
+       struct pqi_raid_error_info *error_info;
+       size_t sense_data_length;
+       int residual_count;
+       int xfer_count;
+       struct scsi_sense_hdr sshdr;
+
+       scmd = io_request->scmd;
+       if (!scmd)
+               return;
+
+       error_info = io_request->error_info;
+       scsi_status = error_info->status;
+       host_byte = DID_OK;
+
+       if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
+               xfer_count =
+                       get_unaligned_le32(&error_info->data_out_transferred);
+               residual_count = scsi_bufflen(scmd) - xfer_count;
+               scsi_set_resid(scmd, residual_count);
+               if (xfer_count < scmd->underflow)
+                       host_byte = DID_SOFT_ERROR;
+       }
+
+       sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
+       if (sense_data_length == 0)
+               sense_data_length =
+                       get_unaligned_le16(&error_info->response_data_length);
+       if (sense_data_length) {
+               if (sense_data_length > sizeof(error_info->data))
+                       sense_data_length = sizeof(error_info->data);
+
+               if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+                       scsi_normalize_sense(error_info->data,
+                               sense_data_length, &sshdr) &&
+                               sshdr.sense_key == HARDWARE_ERROR &&
+                               sshdr.asc == 0x3e &&
+                               sshdr.ascq == 0x1) {
+                       pqi_take_device_offline(scmd->device);
+                       host_byte = DID_NO_CONNECT;
+               }
+
+               if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
+                       sense_data_length = SCSI_SENSE_BUFFERSIZE;
+               memcpy(scmd->sense_buffer, error_info->data,
+                       sense_data_length);
+       }
+
+       scmd->result = scsi_status;
+       set_host_byte(scmd, host_byte);
+}
+
+static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
+{
+       u8 scsi_status;
+       u8 host_byte;
+       struct scsi_cmnd *scmd;
+       struct pqi_aio_error_info *error_info;
+       size_t sense_data_length;
+       int residual_count;
+       int xfer_count;
+       bool device_offline;
+
+       scmd = io_request->scmd;
+       error_info = io_request->error_info;
+       host_byte = DID_OK;
+       sense_data_length = 0;
+       device_offline = false;
+
+       switch (error_info->service_response) {
+       case PQI_AIO_SERV_RESPONSE_COMPLETE:
+               scsi_status = error_info->status;
+               break;
+       case PQI_AIO_SERV_RESPONSE_FAILURE:
+               switch (error_info->status) {
+               case PQI_AIO_STATUS_IO_ABORTED:
+                       scsi_status = SAM_STAT_TASK_ABORTED;
+                       break;
+               case PQI_AIO_STATUS_UNDERRUN:
+                       scsi_status = SAM_STAT_GOOD;
+                       residual_count = get_unaligned_le32(
+                                               &error_info->residual_count);
+                       scsi_set_resid(scmd, residual_count);
+                       xfer_count = scsi_bufflen(scmd) - residual_count;
+                       if (xfer_count < scmd->underflow)
+                               host_byte = DID_SOFT_ERROR;
+                       break;
+               case PQI_AIO_STATUS_OVERRUN:
+                       scsi_status = SAM_STAT_GOOD;
+                       break;
+               case PQI_AIO_STATUS_AIO_PATH_DISABLED:
+                       pqi_aio_path_disabled(io_request);
+                       scsi_status = SAM_STAT_GOOD;
+                       io_request->status = -EAGAIN;
+                       break;
+               case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
+               case PQI_AIO_STATUS_INVALID_DEVICE:
+                       device_offline = true;
+                       pqi_take_device_offline(scmd->device);
+                       host_byte = DID_NO_CONNECT;
+                       scsi_status = SAM_STAT_CHECK_CONDITION;
+                       break;
+               case PQI_AIO_STATUS_IO_ERROR:
+               default:
+                       scsi_status = SAM_STAT_CHECK_CONDITION;
+                       break;
+               }
+               break;
+       case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
+       case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
+               scsi_status = SAM_STAT_GOOD;
+               break;
+       case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
+       case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
+       default:
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               break;
+       }
+
+       if (error_info->data_present) {
+               sense_data_length =
+                       get_unaligned_le16(&error_info->data_length);
+               if (sense_data_length) {
+                       if (sense_data_length > sizeof(error_info->data))
+                               sense_data_length = sizeof(error_info->data);
+                       if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
+                               sense_data_length = SCSI_SENSE_BUFFERSIZE;
+                       memcpy(scmd->sense_buffer, error_info->data,
+                               sense_data_length);
+               }
+       }
+
+       if (device_offline && sense_data_length == 0)
+               scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
+                       0x3e, 0x1);
+
+       scmd->result = scsi_status;
+       set_host_byte(scmd, host_byte);
+}
+
+static void pqi_process_io_error(unsigned int iu_type,
+       struct pqi_io_request *io_request)
+{
+       switch (iu_type) {
+       case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+               pqi_process_raid_io_error(io_request);
+               break;
+       case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+               pqi_process_aio_io_error(io_request);
+               break;
+       }
+}
+
+static int pqi_interpret_task_management_response(
+       struct pqi_task_management_response *response)
+{
+       int rc;
+
+       switch (response->response_code) {
+       case SOP_TMF_COMPLETE:
+       case SOP_TMF_FUNCTION_SUCCEEDED:
+               rc = 0;
+               break;
+       default:
+               rc = -EIO;
+               break;
+       }
+
+       return rc;
+}
+
+static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_queue_group *queue_group)
+{
+       unsigned int num_responses;
+       pqi_index_t oq_pi;
+       pqi_index_t oq_ci;
+       struct pqi_io_request *io_request;
+       struct pqi_io_response *response;
+       u16 request_id;
+
+       num_responses = 0;
+       oq_ci = queue_group->oq_ci_copy;
+
+       while (1) {
+               oq_pi = *queue_group->oq_pi;
+               if (oq_pi == oq_ci)
+                       break;
+
+               num_responses++;
+               response = queue_group->oq_element_array +
+                       (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+
+               request_id = get_unaligned_le16(&response->request_id);
+               WARN_ON(request_id >= ctrl_info->max_io_slots);
+
+               io_request = &ctrl_info->io_request_pool[request_id];
+               WARN_ON(atomic_read(&io_request->refcount) == 0);
+
+               switch (response->header.iu_type) {
+               case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
+               case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+               case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
+                       break;
+               case PQI_RESPONSE_IU_TASK_MANAGEMENT:
+                       io_request->status =
+                               pqi_interpret_task_management_response(
+                                       (void *)response);
+                       break;
+               case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
+                       pqi_aio_path_disabled(io_request);
+                       io_request->status = -EAGAIN;
+                       break;
+               case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+               case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+                       io_request->error_info = ctrl_info->error_buffer +
+                               (get_unaligned_le16(&response->error_index) *
+                               PQI_ERROR_BUFFER_ELEMENT_LENGTH);
+                       pqi_process_io_error(response->header.iu_type,
+                               io_request);
+                       break;
+               default:
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "unexpected IU type: 0x%x\n",
+                               response->header.iu_type);
+                       WARN_ON(response->header.iu_type);
+                       break;
+               }
+
+               io_request->io_complete_callback(io_request,
+                       io_request->context);
+
+               /*
+                * Note that the I/O request structure CANNOT BE TOUCHED after
+                * returning from the I/O completion callback!
+                */
+
+               oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
+       }
+
+       if (num_responses) {
+               queue_group->oq_ci_copy = oq_ci;
+               writel(oq_ci, queue_group->oq_ci);
+       }
+
+       return num_responses;
+}
+
+static inline unsigned int pqi_num_elements_free(unsigned int pi,
+       unsigned int ci, unsigned int elements_in_queue)
+{
+       unsigned int num_elements_used;
+
+       if (pi >= ci)
+               num_elements_used = pi - ci;
+       else
+               num_elements_used = elements_in_queue - ci + pi;
+
+       return elements_in_queue - num_elements_used - 1;
+}
+
+#define PQI_EVENT_ACK_TIMEOUT  30
+
+static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_event_acknowledge_request *iu, size_t iu_length)
+{
+       pqi_index_t iq_pi;
+       pqi_index_t iq_ci;
+       unsigned long flags;
+       void *next_element;
+       unsigned long timeout;
+       struct pqi_queue_group *queue_group;
+
+       queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
+       put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
+
+       timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
+
+       while (1) {
+               spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
+
+               iq_pi = queue_group->iq_pi_copy[RAID_PATH];
+               iq_ci = *queue_group->iq_ci[RAID_PATH];
+
+               if (pqi_num_elements_free(iq_pi, iq_ci,
+                       ctrl_info->num_elements_per_iq))
+                       break;
+
+               spin_unlock_irqrestore(
+                       &queue_group->submit_lock[RAID_PATH], flags);
+
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "sending event acknowledge timed out\n");
+                       return;
+               }
+       }
+
+       next_element = queue_group->iq_element_array[RAID_PATH] +
+               (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+       memcpy(next_element, iu, iu_length);
+
+       iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
+
+       queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
+
+       /*
+        * This write notifies the controller that an IU is available to be
+        * processed.
+        */
+       writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
+
+       spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
+}
+
+static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_event *event)
+{
+       struct pqi_event_acknowledge_request request;
+
+       memset(&request, 0, sizeof(request));
+
+       request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
+       put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+               &request.header.iu_length);
+       request.event_type = event->event_type;
+       request.event_id = event->event_id;
+       request.additional_event_id = event->additional_event_id;
+
+       pqi_start_event_ack(ctrl_info, &request, sizeof(request));
+}
+
+static void pqi_event_worker(struct work_struct *work)
+{
+       unsigned int i;
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_event *pending_event;
+       bool got_non_heartbeat_event = false;
+
+       ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
+
+       pending_event = ctrl_info->pending_events;
+       for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+               if (pending_event->pending) {
+                       pending_event->pending = false;
+                       pqi_acknowledge_event(ctrl_info, pending_event);
+                       if (i != PQI_EVENT_HEARTBEAT)
+                               got_non_heartbeat_event = true;
+               }
+               pending_event++;
+       }
+
+       if (got_non_heartbeat_event)
+               pqi_schedule_rescan_worker(ctrl_info);
+}
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       unsigned int path;
+       struct pqi_queue_group *queue_group;
+       unsigned long flags;
+       struct pqi_io_request *io_request;
+       struct pqi_io_request *next;
+       struct scsi_cmnd *scmd;
+
+       ctrl_info->controller_online = false;
+       dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               queue_group = &ctrl_info->queue_groups[i];
+
+               for (path = 0; path < 2; path++) {
+                       spin_lock_irqsave(
+                               &queue_group->submit_lock[path], flags);
+
+                       list_for_each_entry_safe(io_request, next,
+                               &queue_group->request_list[path],
+                               request_list_entry) {
+
+                               scmd = io_request->scmd;
+                               if (scmd) {
+                                       set_host_byte(scmd, DID_NO_CONNECT);
+                                       pqi_scsi_done(scmd);
+                               }
+
+                               list_del(&io_request->request_list_entry);
+                       }
+
+                       spin_unlock_irqrestore(
+                               &queue_group->submit_lock[path], flags);
+               }
+       }
+}
+
+#define PQI_HEARTBEAT_TIMER_INTERVAL   (5 * HZ)
+#define PQI_MAX_HEARTBEAT_REQUESTS     5
+
+static void pqi_heartbeat_timer_handler(unsigned long data)
+{
+       int num_interrupts;
+       struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+
+       num_interrupts = atomic_read(&ctrl_info->num_interrupts);
+
+       if (num_interrupts == ctrl_info->previous_num_interrupts) {
+               ctrl_info->num_heartbeats_requested++;
+               if (ctrl_info->num_heartbeats_requested >
+                       PQI_MAX_HEARTBEAT_REQUESTS) {
+                       pqi_take_ctrl_offline(ctrl_info);
+                       return;
+               }
+               ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
+               schedule_work(&ctrl_info->event_work);
+       } else {
+               ctrl_info->num_heartbeats_requested = 0;
+       }
+
+       ctrl_info->previous_num_interrupts = num_interrupts;
+       mod_timer(&ctrl_info->heartbeat_timer,
+               jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
+}
+
+static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
+{
+       ctrl_info->previous_num_interrupts =
+               atomic_read(&ctrl_info->num_interrupts);
+
+       init_timer(&ctrl_info->heartbeat_timer);
+       ctrl_info->heartbeat_timer.expires =
+               jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
+       ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
+       ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
+       add_timer(&ctrl_info->heartbeat_timer);
+       ctrl_info->heartbeat_timer_started = true;
+}
+
+static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
+{
+       if (ctrl_info->heartbeat_timer_started)
+               del_timer_sync(&ctrl_info->heartbeat_timer);
+}
+
+static int pqi_event_type_to_event_index(unsigned int event_type)
+{
+       int index;
+
+       switch (event_type) {
+       case PQI_EVENT_TYPE_HEARTBEAT:
+               index = PQI_EVENT_HEARTBEAT;
+               break;
+       case PQI_EVENT_TYPE_HOTPLUG:
+               index = PQI_EVENT_HOTPLUG;
+               break;
+       case PQI_EVENT_TYPE_HARDWARE:
+               index = PQI_EVENT_HARDWARE;
+               break;
+       case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
+               index = PQI_EVENT_PHYSICAL_DEVICE;
+               break;
+       case PQI_EVENT_TYPE_LOGICAL_DEVICE:
+               index = PQI_EVENT_LOGICAL_DEVICE;
+               break;
+       case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
+               index = PQI_EVENT_AIO_STATE_CHANGE;
+               break;
+       case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
+               index = PQI_EVENT_AIO_CONFIG_CHANGE;
+               break;
+       default:
+               index = -1;
+               break;
+       }
+
+       return index;
+}
+
+static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int num_events;
+       pqi_index_t oq_pi;
+       pqi_index_t oq_ci;
+       struct pqi_event_queue *event_queue;
+       struct pqi_event_response *response;
+       struct pqi_event *pending_event;
+       bool need_delayed_work;
+       int event_index;
+
+       event_queue = &ctrl_info->event_queue;
+       num_events = 0;
+       need_delayed_work = false;
+       oq_ci = event_queue->oq_ci_copy;
+
+       while (1) {
+               oq_pi = *event_queue->oq_pi;
+               if (oq_pi == oq_ci)
+                       break;
+
+               num_events++;
+               response = event_queue->oq_element_array +
+                       (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+
+               event_index =
+                       pqi_event_type_to_event_index(response->event_type);
+
+               if (event_index >= 0) {
+                       if (response->request_acknowlege) {
+                               pending_event =
+                                       &ctrl_info->pending_events[event_index];
+                               pending_event->event_type =
+                                       response->event_type;
+                               pending_event->event_id = response->event_id;
+                               pending_event->additional_event_id =
+                                       response->additional_event_id;
+                               if (event_index != PQI_EVENT_HEARTBEAT) {
+                                       pending_event->pending = true;
+                                       need_delayed_work = true;
+                               }
+                       }
+               }
+
+               oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
+       }
+
+       if (num_events) {
+               event_queue->oq_ci_copy = oq_ci;
+               writel(oq_ci, event_queue->oq_ci);
+
+               if (need_delayed_work)
+                       schedule_work(&ctrl_info->event_work);
+       }
+
+       return num_events;
+}
+
+static irqreturn_t pqi_irq_handler(int irq, void *data)
+{
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_queue_group *queue_group;
+       unsigned int num_responses_handled;
+
+       queue_group = data;
+       ctrl_info = queue_group->ctrl_info;
+
+       if (!ctrl_info || !queue_group->oq_ci)
+               return IRQ_NONE;
+
+       num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+
+       if (irq == ctrl_info->event_irq)
+               num_responses_handled += pqi_process_event_intr(ctrl_info);
+
+       if (num_responses_handled)
+               atomic_inc(&ctrl_info->num_interrupts);
+
+       pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
+       pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+
+       return IRQ_HANDLED;
+}
+
+static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+       int i;
+       int rc;
+
+       ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+
+       for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
+               rc = request_irq(ctrl_info->msix_vectors[i],
+                       pqi_irq_handler, 0,
+                       DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+               if (rc) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "irq %u init failed with error %d\n",
+                               ctrl_info->msix_vectors[i], rc);
+                       return rc;
+               }
+               ctrl_info->num_msix_vectors_initialized++;
+       }
+
+       return 0;
+}
+
+static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+       int i;
+
+       for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+               free_irq(ctrl_info->msix_vectors[i],
+                       ctrl_info->intr_data[i]);
+}
+
+static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       int max_vectors;
+       int num_vectors_enabled;
+       struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
+
+       max_vectors = ctrl_info->num_queue_groups;
+
+       for (i = 0; i < max_vectors; i++)
+               msix_entries[i].entry = i;
+
+       num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
+               msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+
+       if (num_vectors_enabled < 0) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "MSI-X init failed with error %d\n",
+                       num_vectors_enabled);
+               return num_vectors_enabled;
+       }
+
+       ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
+       for (i = 0; i < num_vectors_enabled; i++) {
+               ctrl_info->msix_vectors[i] = msix_entries[i].vector;
+               ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+       }
+
+       return 0;
+}
+
+static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
+{
+       int i;
+       int rc;
+       int cpu;
+
+       cpu = cpumask_first(cpu_online_mask);
+       for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+               rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
+                       get_cpu_mask(cpu));
+               if (rc)
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "error %d setting affinity hint for irq vector %u\n",
+                               rc, ctrl_info->msix_vectors[i]);
+               cpu = cpumask_next(cpu, cpu_online_mask);
+       }
+}
+
+static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
+{
+       int i;
+
+       for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+               irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
+}
+
+static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       size_t alloc_length;
+       size_t element_array_length_per_iq;
+       size_t element_array_length_per_oq;
+       void *element_array;
+       void *next_queue_index;
+       void *aligned_pointer;
+       unsigned int num_inbound_queues;
+       unsigned int num_outbound_queues;
+       unsigned int num_queue_indexes;
+       struct pqi_queue_group *queue_group;
+
+       element_array_length_per_iq =
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
+               ctrl_info->num_elements_per_iq;
+       element_array_length_per_oq =
+               PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
+               ctrl_info->num_elements_per_oq;
+       num_inbound_queues = ctrl_info->num_queue_groups * 2;
+       num_outbound_queues = ctrl_info->num_queue_groups;
+       num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
+
+       aligned_pointer = NULL;
+
+       for (i = 0; i < num_inbound_queues; i++) {
+               aligned_pointer = PTR_ALIGN(aligned_pointer,
+                       PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+               aligned_pointer += element_array_length_per_iq;
+       }
+
+       for (i = 0; i < num_outbound_queues; i++) {
+               aligned_pointer = PTR_ALIGN(aligned_pointer,
+                       PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+               aligned_pointer += element_array_length_per_oq;
+       }
+
+       aligned_pointer = PTR_ALIGN(aligned_pointer,
+               PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+       aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
+               PQI_EVENT_OQ_ELEMENT_LENGTH;
+
+       for (i = 0; i < num_queue_indexes; i++) {
+               aligned_pointer = PTR_ALIGN(aligned_pointer,
+                       PQI_OPERATIONAL_INDEX_ALIGNMENT);
+               aligned_pointer += sizeof(pqi_index_t);
+       }
+
+       alloc_length = (size_t)aligned_pointer +
+               PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+
+       ctrl_info->queue_memory_base =
+               dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+                       alloc_length,
+                       &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+
+       if (!ctrl_info->queue_memory_base) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "failed to allocate memory for PQI admin queues\n");
+               return -ENOMEM;
+       }
+
+       ctrl_info->queue_memory_length = alloc_length;
+
+       element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
+               PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               queue_group = &ctrl_info->queue_groups[i];
+               queue_group->iq_element_array[RAID_PATH] = element_array;
+               queue_group->iq_element_array_bus_addr[RAID_PATH] =
+                       ctrl_info->queue_memory_base_dma_handle +
+                               (element_array - ctrl_info->queue_memory_base);
+               element_array += element_array_length_per_iq;
+               element_array = PTR_ALIGN(element_array,
+                       PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+               queue_group->iq_element_array[AIO_PATH] = element_array;
+               queue_group->iq_element_array_bus_addr[AIO_PATH] =
+                       ctrl_info->queue_memory_base_dma_handle +
+                       (element_array - ctrl_info->queue_memory_base);
+               element_array += element_array_length_per_iq;
+               element_array = PTR_ALIGN(element_array,
+                       PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+       }
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               queue_group = &ctrl_info->queue_groups[i];
+               queue_group->oq_element_array = element_array;
+               queue_group->oq_element_array_bus_addr =
+                       ctrl_info->queue_memory_base_dma_handle +
+                       (element_array - ctrl_info->queue_memory_base);
+               element_array += element_array_length_per_oq;
+               element_array = PTR_ALIGN(element_array,
+                       PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+       }
+
+       ctrl_info->event_queue.oq_element_array = element_array;
+       ctrl_info->event_queue.oq_element_array_bus_addr =
+               ctrl_info->queue_memory_base_dma_handle +
+               (element_array - ctrl_info->queue_memory_base);
+       element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
+               PQI_EVENT_OQ_ELEMENT_LENGTH;
+
+       next_queue_index = PTR_ALIGN(element_array,
+               PQI_OPERATIONAL_INDEX_ALIGNMENT);
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               queue_group = &ctrl_info->queue_groups[i];
+               queue_group->iq_ci[RAID_PATH] = next_queue_index;
+               queue_group->iq_ci_bus_addr[RAID_PATH] =
+                       ctrl_info->queue_memory_base_dma_handle +
+                       (next_queue_index - ctrl_info->queue_memory_base);
+               next_queue_index += sizeof(pqi_index_t);
+               next_queue_index = PTR_ALIGN(next_queue_index,
+                       PQI_OPERATIONAL_INDEX_ALIGNMENT);
+               queue_group->iq_ci[AIO_PATH] = next_queue_index;
+               queue_group->iq_ci_bus_addr[AIO_PATH] =
+                       ctrl_info->queue_memory_base_dma_handle +
+                       (next_queue_index - ctrl_info->queue_memory_base);
+               next_queue_index += sizeof(pqi_index_t);
+               next_queue_index = PTR_ALIGN(next_queue_index,
+                       PQI_OPERATIONAL_INDEX_ALIGNMENT);
+               queue_group->oq_pi = next_queue_index;
+               queue_group->oq_pi_bus_addr =
+                       ctrl_info->queue_memory_base_dma_handle +
+                       (next_queue_index - ctrl_info->queue_memory_base);
+               next_queue_index += sizeof(pqi_index_t);
+               next_queue_index = PTR_ALIGN(next_queue_index,
+                       PQI_OPERATIONAL_INDEX_ALIGNMENT);
+       }
+
+       ctrl_info->event_queue.oq_pi = next_queue_index;
+       ctrl_info->event_queue.oq_pi_bus_addr =
+               ctrl_info->queue_memory_base_dma_handle +
+               (next_queue_index - ctrl_info->queue_memory_base);
+
+       return 0;
+}
+
+static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
+       u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
+
+       /*
+        * Initialize the backpointers to the controller structure in
+        * each operational queue group structure.
+        */
+       for (i = 0; i < ctrl_info->num_queue_groups; i++)
+               ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
+
+       /*
+        * Assign IDs to all operational queues.  Note that the IDs
+        * assigned to operational IQs are independent of the IDs
+        * assigned to operational OQs.
+        */
+       ctrl_info->event_queue.oq_id = next_oq_id++;
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
+               ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
+               ctrl_info->queue_groups[i].oq_id = next_oq_id++;
+       }
+
+       /*
+        * Assign MSI-X table entry indexes to all queues.  Note that the
+        * interrupt for the event queue is shared with the first queue group.
+        */
+       ctrl_info->event_queue.int_msg_num = 0;
+       for (i = 0; i < ctrl_info->num_queue_groups; i++)
+               ctrl_info->queue_groups[i].int_msg_num = i;
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
+               spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
+               INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
+               INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
+       }
+}
+
+static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
+{
+       size_t alloc_length;
+       struct pqi_admin_queues_aligned *admin_queues_aligned;
+       struct pqi_admin_queues *admin_queues;
+
+       alloc_length = sizeof(struct pqi_admin_queues_aligned) +
+               PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+
+       ctrl_info->admin_queue_memory_base =
+               dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+                       alloc_length,
+                       &ctrl_info->admin_queue_memory_base_dma_handle,
+                       GFP_KERNEL);
+
+       if (!ctrl_info->admin_queue_memory_base)
+               return -ENOMEM;
+
+       ctrl_info->admin_queue_memory_length = alloc_length;
+
+       admin_queues = &ctrl_info->admin_queues;
+       admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
+               PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+       admin_queues->iq_element_array =
+               &admin_queues_aligned->iq_element_array;
+       admin_queues->oq_element_array =
+               &admin_queues_aligned->oq_element_array;
+       admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+       admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
+
+       admin_queues->iq_element_array_bus_addr =
+               ctrl_info->admin_queue_memory_base_dma_handle +
+               (admin_queues->iq_element_array -
+               ctrl_info->admin_queue_memory_base);
+       admin_queues->oq_element_array_bus_addr =
+               ctrl_info->admin_queue_memory_base_dma_handle +
+               (admin_queues->oq_element_array -
+               ctrl_info->admin_queue_memory_base);
+       admin_queues->iq_ci_bus_addr =
+               ctrl_info->admin_queue_memory_base_dma_handle +
+               ((void *)admin_queues->iq_ci -
+               ctrl_info->admin_queue_memory_base);
+       admin_queues->oq_pi_bus_addr =
+               ctrl_info->admin_queue_memory_base_dma_handle +
+               ((void *)admin_queues->oq_pi -
+               ctrl_info->admin_queue_memory_base);
+
+       return 0;
+}
+
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES         HZ
+#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS     1
+
+static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
+{
+       struct pqi_device_registers __iomem *pqi_registers;
+       struct pqi_admin_queues *admin_queues;
+       unsigned long timeout;
+       u8 status;
+       u32 reg;
+
+       pqi_registers = ctrl_info->pqi_registers;
+       admin_queues = &ctrl_info->admin_queues;
+
+       writeq((u64)admin_queues->iq_element_array_bus_addr,
+               &pqi_registers->admin_iq_element_array_addr);
+       writeq((u64)admin_queues->oq_element_array_bus_addr,
+               &pqi_registers->admin_oq_element_array_addr);
+       writeq((u64)admin_queues->iq_ci_bus_addr,
+               &pqi_registers->admin_iq_ci_addr);
+       writeq((u64)admin_queues->oq_pi_bus_addr,
+               &pqi_registers->admin_oq_pi_addr);
+
+       reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
+               (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+               (admin_queues->int_msg_num << 16);
+       writel(reg, &pqi_registers->admin_iq_num_elements);
+       writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
+               &pqi_registers->function_and_status_code);
+
+       timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
+       while (1) {
+               status = readb(&pqi_registers->function_and_status_code);
+               if (status == PQI_STATUS_IDLE)
+                       break;
+               if (time_after(jiffies, timeout))
+                       return -ETIMEDOUT;
+               msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
+       }
+
+       /*
+        * The offset registers are not initialized to the correct
+        * offsets until *after* the create admin queue pair command
+        * completes successfully.
+        */
+       admin_queues->iq_pi = ctrl_info->iomem_base +
+               PQI_DEVICE_REGISTERS_OFFSET +
+               readq(&pqi_registers->admin_iq_pi_offset);
+       admin_queues->oq_ci = ctrl_info->iomem_base +
+               PQI_DEVICE_REGISTERS_OFFSET +
+               readq(&pqi_registers->admin_oq_ci_offset);
+
+       return 0;
+}
+
+static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_general_admin_request *request)
+{
+       struct pqi_admin_queues *admin_queues;
+       void *next_element;
+       pqi_index_t iq_pi;
+
+       admin_queues = &ctrl_info->admin_queues;
+       iq_pi = admin_queues->iq_pi_copy;
+
+       next_element = admin_queues->iq_element_array +
+               (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
+
+       memcpy(next_element, request, sizeof(*request));
+
+       iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
+       admin_queues->iq_pi_copy = iq_pi;
+
+       /*
+        * This write notifies the controller that an IU is available to be
+        * processed.
+        */
+       writel(iq_pi, admin_queues->iq_pi);
+}
+
+static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_general_admin_response *response)
+{
+       struct pqi_admin_queues *admin_queues;
+       pqi_index_t oq_pi;
+       pqi_index_t oq_ci;
+       unsigned long timeout;
+
+       admin_queues = &ctrl_info->admin_queues;
+       oq_ci = admin_queues->oq_ci_copy;
+
+       timeout = (3 * HZ) + jiffies;
+
+       while (1) {
+               oq_pi = *admin_queues->oq_pi;
+               if (oq_pi != oq_ci)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "timed out waiting for admin response\n");
+                       return -ETIMEDOUT;
+               }
+               usleep_range(1000, 2000);
+       }
+
+       memcpy(response, admin_queues->oq_element_array +
+               (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
+
+       oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
+       admin_queues->oq_ci_copy = oq_ci;
+       writel(oq_ci, admin_queues->oq_ci);
+
+       return 0;
+}
+
+static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_queue_group *queue_group, enum pqi_io_path path,
+       struct pqi_io_request *io_request)
+{
+       struct pqi_io_request *next;
+       void *next_element;
+       pqi_index_t iq_pi;
+       pqi_index_t iq_ci;
+       size_t iu_length;
+       unsigned long flags;
+       unsigned int num_elements_needed;
+       unsigned int num_elements_to_end_of_queue;
+       size_t copy_count;
+       struct pqi_iu_header *request;
+
+       spin_lock_irqsave(&queue_group->submit_lock[path], flags);
+
+       if (io_request)
+               list_add_tail(&io_request->request_list_entry,
+                       &queue_group->request_list[path]);
+
+       iq_pi = queue_group->iq_pi_copy[path];
+
+       list_for_each_entry_safe(io_request, next,
+               &queue_group->request_list[path], request_list_entry) {
+
+               request = io_request->iu;
+
+               iu_length = get_unaligned_le16(&request->iu_length) +
+                       PQI_REQUEST_HEADER_LENGTH;
+               num_elements_needed =
+                       DIV_ROUND_UP(iu_length,
+                               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+               iq_ci = *queue_group->iq_ci[path];
+
+               if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
+                       ctrl_info->num_elements_per_iq))
+                       break;
+
+               put_unaligned_le16(queue_group->oq_id,
+                       &request->response_queue_id);
+
+               next_element = queue_group->iq_element_array[path] +
+                       (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+               num_elements_to_end_of_queue =
+                       ctrl_info->num_elements_per_iq - iq_pi;
+
+               if (num_elements_needed <= num_elements_to_end_of_queue) {
+                       memcpy(next_element, request, iu_length);
+               } else {
+                       copy_count = num_elements_to_end_of_queue *
+                               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
+                       memcpy(next_element, request, copy_count);
+                       memcpy(queue_group->iq_element_array[path],
+                               (u8 *)request + copy_count,
+                               iu_length - copy_count);
+               }
+
+               iq_pi = (iq_pi + num_elements_needed) %
+                       ctrl_info->num_elements_per_iq;
+
+               list_del(&io_request->request_list_entry);
+       }
+
+       if (iq_pi != queue_group->iq_pi_copy[path]) {
+               queue_group->iq_pi_copy[path] = iq_pi;
+               /*
+                * This write notifies the controller that one or more IUs are
+                * available to be processed.
+                */
+               writel(iq_pi, queue_group->iq_pi[path]);
+       }
+
+       spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
+}
+
+static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
+       void *context)
+{
+       struct completion *waiting = context;
+
+       complete(waiting);
+}
+
+static int pqi_submit_raid_request_synchronous_with_io_request(
+       struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+       unsigned long timeout_msecs)
+{
+       int rc = 0;
+       DECLARE_COMPLETION_ONSTACK(wait);
+
+       io_request->io_complete_callback = pqi_raid_synchronous_complete;
+       io_request->context = &wait;
+
+       pqi_start_io(ctrl_info,
+               &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+               io_request);
+
+       if (timeout_msecs == NO_TIMEOUT) {
+               wait_for_completion_io(&wait);
+       } else {
+               if (!wait_for_completion_io_timeout(&wait,
+                       msecs_to_jiffies(timeout_msecs))) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "command timed out\n");
+                       rc = -ETIMEDOUT;
+               }
+       }
+
+       return rc;
+}
+
+static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_iu_header *request, unsigned int flags,
+       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+{
+       int rc;
+       struct pqi_io_request *io_request;
+       unsigned long start_jiffies;
+       unsigned long msecs_blocked;
+       size_t iu_length;
+
+       /*
+        * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
+        * are mutually exclusive.
+        */
+
+       if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
+               if (down_interruptible(&ctrl_info->sync_request_sem))
+                       return -ERESTARTSYS;
+       } else {
+               if (timeout_msecs == NO_TIMEOUT) {
+                       down(&ctrl_info->sync_request_sem);
+               } else {
+                       start_jiffies = jiffies;
+                       if (down_timeout(&ctrl_info->sync_request_sem,
+                               msecs_to_jiffies(timeout_msecs)))
+                               return -ETIMEDOUT;
+                       msecs_blocked =
+                               jiffies_to_msecs(jiffies - start_jiffies);
+                       if (msecs_blocked >= timeout_msecs)
+                               return -ETIMEDOUT;
+                       timeout_msecs -= msecs_blocked;
+               }
+       }
+
+       io_request = pqi_alloc_io_request(ctrl_info);
+
+       put_unaligned_le16(io_request->index,
+               &(((struct pqi_raid_path_request *)request)->request_id));
+
+       if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
+               ((struct pqi_raid_path_request *)request)->error_index =
+                       ((struct pqi_raid_path_request *)request)->request_id;
+
+       iu_length = get_unaligned_le16(&request->iu_length) +
+               PQI_REQUEST_HEADER_LENGTH;
+       memcpy(io_request->iu, request, iu_length);
+
+       rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
+               io_request, timeout_msecs);
+
+       if (error_info) {
+               if (io_request->error_info)
+                       memcpy(error_info, io_request->error_info,
+                               sizeof(*error_info));
+               else
+                       memset(error_info, 0, sizeof(*error_info));
+       } else if (rc == 0 && io_request->error_info) {
+               u8 scsi_status;
+               struct pqi_raid_error_info *raid_error_info;
+
+               raid_error_info = io_request->error_info;
+               scsi_status = raid_error_info->status;
+
+               if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+                       raid_error_info->data_out_result ==
+                       PQI_DATA_IN_OUT_UNDERFLOW)
+                       scsi_status = SAM_STAT_GOOD;
+
+               if (scsi_status != SAM_STAT_GOOD)
+                       rc = -EIO;
+       }
+
+       pqi_free_io_request(io_request);
+
+       up(&ctrl_info->sync_request_sem);
+
+       return rc;
+}
+
+static int pqi_validate_admin_response(
+       struct pqi_general_admin_response *response, u8 expected_function_code)
+{
+       if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
+               return -EINVAL;
+
+       if (get_unaligned_le16(&response->header.iu_length) !=
+               PQI_GENERAL_ADMIN_IU_LENGTH)
+               return -EINVAL;
+
+       if (response->function_code != expected_function_code)
+               return -EINVAL;
+
+       if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int pqi_submit_admin_request_synchronous(
+       struct pqi_ctrl_info *ctrl_info,
+       struct pqi_general_admin_request *request,
+       struct pqi_general_admin_response *response)
+{
+       int rc;
+
+       pqi_submit_admin_request(ctrl_info, request);
+
+       rc = pqi_poll_for_admin_response(ctrl_info, response);
+
+       if (rc == 0)
+               rc = pqi_validate_admin_response(response,
+                       request->function_code);
+
+       return rc;
+}
+
+static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct pqi_general_admin_request request;
+       struct pqi_general_admin_response response;
+       struct pqi_device_capability *capability;
+       struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
+
+       capability = kmalloc(sizeof(*capability), GFP_KERNEL);
+       if (!capability)
+               return -ENOMEM;
+
+       memset(&request, 0, sizeof(request));
+
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       request.function_code =
+               PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
+       put_unaligned_le32(sizeof(*capability),
+               &request.data.report_device_capability.buffer_length);
+
+       rc = pqi_map_single(ctrl_info->pci_dev,
+               &request.data.report_device_capability.sg_descriptor,
+               capability, sizeof(*capability),
+               PCI_DMA_FROMDEVICE);
+       if (rc)
+               goto out;
+
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+
+       pqi_pci_unmap(ctrl_info->pci_dev,
+               &request.data.report_device_capability.sg_descriptor, 1,
+               PCI_DMA_FROMDEVICE);
+
+       if (rc)
+               goto out;
+
+       if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
+               rc = -EIO;
+               goto out;
+       }
+
+       ctrl_info->max_inbound_queues =
+               get_unaligned_le16(&capability->max_inbound_queues);
+       ctrl_info->max_elements_per_iq =
+               get_unaligned_le16(&capability->max_elements_per_iq);
+       ctrl_info->max_iq_element_length =
+               get_unaligned_le16(&capability->max_iq_element_length)
+               * 16;
+       ctrl_info->max_outbound_queues =
+               get_unaligned_le16(&capability->max_outbound_queues);
+       ctrl_info->max_elements_per_oq =
+               get_unaligned_le16(&capability->max_elements_per_oq);
+       ctrl_info->max_oq_element_length =
+               get_unaligned_le16(&capability->max_oq_element_length)
+               * 16;
+
+       sop_iu_layer_descriptor =
+               &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
+
+       ctrl_info->max_inbound_iu_length_per_firmware =
+               get_unaligned_le16(
+                       &sop_iu_layer_descriptor->max_inbound_iu_length);
+       ctrl_info->inbound_spanning_supported =
+               sop_iu_layer_descriptor->inbound_spanning_supported;
+       ctrl_info->outbound_spanning_supported =
+               sop_iu_layer_descriptor->outbound_spanning_supported;
+
+out:
+       kfree(capability);
+
+       return rc;
+}
+
+static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
+{
+       if (ctrl_info->max_iq_element_length <
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "max. inbound queue element length of %d is less than the required length of %d\n",
+                       ctrl_info->max_iq_element_length,
+                       PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+               return -EINVAL;
+       }
+
+       if (ctrl_info->max_oq_element_length <
+               PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "max. outbound queue element length of %d is less than the required length of %d\n",
+                       ctrl_info->max_oq_element_length,
+                       PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+               return -EINVAL;
+       }
+
+       if (ctrl_info->max_inbound_iu_length_per_firmware <
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "max. inbound IU length of %u is less than the min. required length of %d\n",
+                       ctrl_info->max_inbound_iu_length_per_firmware,
+                       PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+               return -EINVAL;
+       }
+
+       if (!ctrl_info->inbound_spanning_supported) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "the controller does not support inbound spanning\n");
+               return -EINVAL;
+       }
+
+       if (ctrl_info->outbound_spanning_supported) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "the controller supports outbound spanning but this driver does not\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
+       bool inbound_queue, u16 queue_id)
+{
+       struct pqi_general_admin_request request;
+       struct pqi_general_admin_response response;
+
+       memset(&request, 0, sizeof(request));
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       if (inbound_queue)
+               request.function_code =
+                       PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
+       else
+               request.function_code =
+                       PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
+       put_unaligned_le16(queue_id,
+               &request.data.delete_operational_queue.queue_id);
+
+       return pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+}
+
+static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct pqi_event_queue *event_queue;
+       struct pqi_general_admin_request request;
+       struct pqi_general_admin_response response;
+
+       event_queue = &ctrl_info->event_queue;
+
+       /*
+        * Create OQ (Outbound Queue - device to host queue) to dedicate
+        * to events.
+        */
+       memset(&request, 0, sizeof(request));
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
+       put_unaligned_le16(event_queue->oq_id,
+               &request.data.create_operational_oq.queue_id);
+       put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
+               &request.data.create_operational_oq.element_array_addr);
+       put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
+               &request.data.create_operational_oq.pi_addr);
+       put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
+               &request.data.create_operational_oq.num_elements);
+       put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
+               &request.data.create_operational_oq.element_length);
+       request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
+       put_unaligned_le16(event_queue->int_msg_num,
+               &request.data.create_operational_oq.int_msg_num);
+
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+       if (rc)
+               return rc;
+
+       event_queue->oq_ci = ctrl_info->iomem_base +
+               PQI_DEVICE_REGISTERS_OFFSET +
+               get_unaligned_le64(
+                       &response.data.create_operational_oq.oq_ci_offset);
+
+       return 0;
+}
+
+static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       int rc;
+       struct pqi_queue_group *queue_group;
+       struct pqi_general_admin_request request;
+       struct pqi_general_admin_response response;
+
+       i = ctrl_info->num_active_queue_groups;
+       queue_group = &ctrl_info->queue_groups[i];
+
+       /*
+        * Create IQ (Inbound Queue - host to device queue) for
+        * RAID path.
+        */
+       memset(&request, 0, sizeof(request));
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
+       put_unaligned_le16(queue_group->iq_id[RAID_PATH],
+               &request.data.create_operational_iq.queue_id);
+       put_unaligned_le64(
+               (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
+               &request.data.create_operational_iq.element_array_addr);
+       put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
+               &request.data.create_operational_iq.ci_addr);
+       put_unaligned_le16(ctrl_info->num_elements_per_iq,
+               &request.data.create_operational_iq.num_elements);
+       put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
+               &request.data.create_operational_iq.element_length);
+       request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
+
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error creating inbound RAID queue\n");
+               return rc;
+       }
+
+       queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
+               PQI_DEVICE_REGISTERS_OFFSET +
+               get_unaligned_le64(
+                       &response.data.create_operational_iq.iq_pi_offset);
+
+       /*
+        * Create IQ (Inbound Queue - host to device queue) for
+        * Advanced I/O (AIO) path.
+        */
+       memset(&request, 0, sizeof(request));
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
+       put_unaligned_le16(queue_group->iq_id[AIO_PATH],
+               &request.data.create_operational_iq.queue_id);
+       put_unaligned_le64((u64)queue_group->
+               iq_element_array_bus_addr[AIO_PATH],
+               &request.data.create_operational_iq.element_array_addr);
+       put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
+               &request.data.create_operational_iq.ci_addr);
+       put_unaligned_le16(ctrl_info->num_elements_per_iq,
+               &request.data.create_operational_iq.num_elements);
+       put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
+               &request.data.create_operational_iq.element_length);
+       request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
+
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error creating inbound AIO queue\n");
+               goto delete_inbound_queue_raid;
+       }
+
+       queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
+               PQI_DEVICE_REGISTERS_OFFSET +
+               get_unaligned_le64(
+                       &response.data.create_operational_iq.iq_pi_offset);
+
+       /*
+        * Designate the 2nd IQ as the AIO path.  By default, all IQs are
+        * assumed to be for RAID path I/O unless we change the queue's
+        * property.
+        */
+       memset(&request, 0, sizeof(request));
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
+       put_unaligned_le16(queue_group->iq_id[AIO_PATH],
+               &request.data.change_operational_iq_properties.queue_id);
+       put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
+               &request.data.change_operational_iq_properties.vendor_specific);
+
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error changing queue property\n");
+               goto delete_inbound_queue_aio;
+       }
+
+       /*
+        * Create OQ (Outbound Queue - device to host queue).
+        */
+       memset(&request, 0, sizeof(request));
+       request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+       put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+               &request.header.iu_length);
+       request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
+       put_unaligned_le16(queue_group->oq_id,
+               &request.data.create_operational_oq.queue_id);
+       put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
+               &request.data.create_operational_oq.element_array_addr);
+       put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
+               &request.data.create_operational_oq.pi_addr);
+       put_unaligned_le16(ctrl_info->num_elements_per_oq,
+               &request.data.create_operational_oq.num_elements);
+       put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
+               &request.data.create_operational_oq.element_length);
+       request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
+       put_unaligned_le16(queue_group->int_msg_num,
+               &request.data.create_operational_oq.int_msg_num);
+
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+               &response);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error creating outbound queue\n");
+               goto delete_inbound_queue_aio;
+       }
+
+       queue_group->oq_ci = ctrl_info->iomem_base +
+               PQI_DEVICE_REGISTERS_OFFSET +
+               get_unaligned_le64(
+                       &response.data.create_operational_oq.oq_ci_offset);
+
+       ctrl_info->num_active_queue_groups++;
+
+       return 0;
+
+delete_inbound_queue_aio:
+       pqi_delete_operational_queue(ctrl_info, true,
+               queue_group->iq_id[AIO_PATH]);
+
+delete_inbound_queue_raid:
+       pqi_delete_operational_queue(ctrl_info, true,
+               queue_group->iq_id[RAID_PATH]);
+
+       return rc;
+}
+
+static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       unsigned int i;
+
+       rc = pqi_create_event_queue(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error creating event queue\n");
+               return rc;
+       }
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               rc = pqi_create_queue_group(ctrl_info);
+               if (rc) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "error creating queue group number %u/%u\n",
+                               i, ctrl_info->num_queue_groups);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH  \
+       (offsetof(struct pqi_event_config, descriptors) + \
+       (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+
+static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       unsigned int i;
+       struct pqi_event_config *event_config;
+       struct pqi_general_management_request request;
+
+       event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+               GFP_KERNEL);
+       if (!event_config)
+               return -ENOMEM;
+
+       memset(&request, 0, sizeof(request));
+
+       request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
+       put_unaligned_le16(offsetof(struct pqi_general_management_request,
+               data.report_event_configuration.sg_descriptors[1]) -
+               PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+       put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+               &request.data.report_event_configuration.buffer_length);
+
+       rc = pqi_map_single(ctrl_info->pci_dev,
+               request.data.report_event_configuration.sg_descriptors,
+               event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+               PCI_DMA_FROMDEVICE);
+       if (rc)
+               goto out;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+               0, NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev,
+               request.data.report_event_configuration.sg_descriptors, 1,
+               PCI_DMA_FROMDEVICE);
+
+       if (rc)
+               goto out;
+
+       for (i = 0; i < event_config->num_event_descriptors; i++)
+               put_unaligned_le16(ctrl_info->event_queue.oq_id,
+                       &event_config->descriptors[i].oq_id);
+
+       memset(&request, 0, sizeof(request));
+
+       request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
+       put_unaligned_le16(offsetof(struct pqi_general_management_request,
+               data.report_event_configuration.sg_descriptors[1]) -
+               PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+       put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+               &request.data.report_event_configuration.buffer_length);
+
+       rc = pqi_map_single(ctrl_info->pci_dev,
+               request.data.report_event_configuration.sg_descriptors,
+               event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+               PCI_DMA_TODEVICE);
+       if (rc)
+               goto out;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+               NULL, NO_TIMEOUT);
+
+       pqi_pci_unmap(ctrl_info->pci_dev,
+               request.data.report_event_configuration.sg_descriptors, 1,
+               PCI_DMA_TODEVICE);
+
+out:
+       kfree(event_config);
+
+       return rc;
+}
+
+static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       struct device *dev;
+       size_t sg_chain_buffer_length;
+       struct pqi_io_request *io_request;
+
+       if (!ctrl_info->io_request_pool)
+               return;
+
+       dev = &ctrl_info->pci_dev->dev;
+       sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
+       io_request = ctrl_info->io_request_pool;
+
+       for (i = 0; i < ctrl_info->max_io_slots; i++) {
+               kfree(io_request->iu);
+               if (!io_request->sg_chain_buffer)
+                       break;
+               dma_free_coherent(dev, sg_chain_buffer_length,
+                       io_request->sg_chain_buffer,
+                       io_request->sg_chain_buffer_dma_handle);
+               io_request++;
+       }
+
+       kfree(ctrl_info->io_request_pool);
+       ctrl_info->io_request_pool = NULL;
+}
+
+static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+       ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+               ctrl_info->error_buffer_length,
+               &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+
+       if (!ctrl_info->error_buffer)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       void *sg_chain_buffer;
+       size_t sg_chain_buffer_length;
+       dma_addr_t sg_chain_buffer_dma_handle;
+       struct device *dev;
+       struct pqi_io_request *io_request;
+
+       ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
+               sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+
+       if (!ctrl_info->io_request_pool) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "failed to allocate I/O request pool\n");
+               goto error;
+       }
+
+       dev = &ctrl_info->pci_dev->dev;
+       sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
+       io_request = ctrl_info->io_request_pool;
+
+       for (i = 0; i < ctrl_info->max_io_slots; i++) {
+               io_request->iu =
+                       kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+
+               if (!io_request->iu) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "failed to allocate IU buffers\n");
+                       goto error;
+               }
+
+               sg_chain_buffer = dma_alloc_coherent(dev,
+                       sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
+                       GFP_KERNEL);
+
+               if (!sg_chain_buffer) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "failed to allocate PQI scatter-gather chain buffers\n");
+                       goto error;
+               }
+
+               io_request->index = i;
+               io_request->sg_chain_buffer = sg_chain_buffer;
+               io_request->sg_chain_buffer_dma_handle =
+                       sg_chain_buffer_dma_handle;
+               io_request++;
+       }
+
+       return 0;
+
+error:
+       pqi_free_all_io_requests(ctrl_info);
+
+       return -ENOMEM;
+}
+
+/*
+ * Calculate required resources that are sized based on max. outstanding
+ * requests and max. transfer size.
+ */
+
+static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
+{
+       u32 max_transfer_size;
+       u32 max_sg_entries;
+
+       ctrl_info->scsi_ml_can_queue =
+               ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
+       ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
+
+       ctrl_info->error_buffer_length =
+               ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
+
+       max_transfer_size =
+               min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
+
+       max_sg_entries = max_transfer_size / PAGE_SIZE;
+
+       /* +1 to cover when the buffer is not page-aligned. */
+       max_sg_entries++;
+
+       max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
+
+       max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
+
+       ctrl_info->sg_chain_buffer_length =
+               max_sg_entries * sizeof(struct pqi_sg_descriptor);
+       ctrl_info->sg_tablesize = max_sg_entries;
+       ctrl_info->max_sectors = max_transfer_size / 512;
+}
+
+static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
+{
+       int num_cpus;
+       int max_queue_groups;
+       int num_queue_groups;
+       u16 num_elements_per_iq;
+       u16 num_elements_per_oq;
+
+       max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
+               ctrl_info->max_outbound_queues - 1);
+       max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
+
+       num_cpus = num_online_cpus();
+       num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+       num_queue_groups = min(num_queue_groups, max_queue_groups);
+
+       ctrl_info->num_queue_groups = num_queue_groups;
+
+       /*
+        * Make sure that the max. inbound IU length is an even multiple
+        * of our inbound element length.
+        */
+       ctrl_info->max_inbound_iu_length =
+               (ctrl_info->max_inbound_iu_length_per_firmware /
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
+
+       num_elements_per_iq =
+               (ctrl_info->max_inbound_iu_length /
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+       /* Add one because one element in each queue is unusable. */
+       num_elements_per_iq++;
+
+       num_elements_per_iq = min(num_elements_per_iq,
+               ctrl_info->max_elements_per_iq);
+
+       num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
+       num_elements_per_oq = min(num_elements_per_oq,
+               ctrl_info->max_elements_per_oq);
+
+       ctrl_info->num_elements_per_iq = num_elements_per_iq;
+       ctrl_info->num_elements_per_oq = num_elements_per_oq;
+
+       ctrl_info->max_sg_per_iu =
+               ((ctrl_info->max_inbound_iu_length -
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+               sizeof(struct pqi_sg_descriptor)) +
+               PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+}
+
+static inline void pqi_set_sg_descriptor(
+       struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+{
+       u64 address = (u64)sg_dma_address(sg);
+       unsigned int length = sg_dma_len(sg);
+
+       put_unaligned_le64(address, &sg_descriptor->address);
+       put_unaligned_le32(length, &sg_descriptor->length);
+       put_unaligned_le32(0, &sg_descriptor->flags);
+}
+
+static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
+{
+       int i;
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       unsigned int max_sg_per_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+
+       iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+
+       if (sg_count == 0)
+               goto out;
+
+       sg = scsi_sglist(scmd);
+       sg_descriptor = request->sg_descriptors;
+       max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
+       chained = false;
+       num_sg_in_iu = 0;
+       i = 0;
+
+       while (1) {
+               pqi_set_sg_descriptor(sg_descriptor, sg);
+               if (!chained)
+                       num_sg_in_iu++;
+               i++;
+               if (i == sg_count)
+                       break;
+               sg_descriptor++;
+               if (i == max_sg_per_iu) {
+                       put_unaligned_le64(
+                               (u64)io_request->sg_chain_buffer_dma_handle,
+                               &sg_descriptor->address);
+                       put_unaligned_le32((sg_count - num_sg_in_iu)
+                               * sizeof(*sg_descriptor),
+                               &sg_descriptor->length);
+                       put_unaligned_le32(CISS_SG_CHAIN,
+                               &sg_descriptor->flags);
+                       chained = true;
+                       num_sg_in_iu++;
+                       sg_descriptor = io_request->sg_chain_buffer;
+               }
+               sg = sg_next(sg);
+       }
+
+       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+       request->partial = chained;
+       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+
+       return 0;
+}
+
+static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
+{
+       int i;
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       unsigned int max_sg_per_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+
+       iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       num_sg_in_iu = 0;
+
+       if (sg_count == 0)
+               goto out;
+
+       sg = scsi_sglist(scmd);
+       sg_descriptor = request->sg_descriptors;
+       max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
+       chained = false;
+       i = 0;
+
+       while (1) {
+               pqi_set_sg_descriptor(sg_descriptor, sg);
+               if (!chained)
+                       num_sg_in_iu++;
+               i++;
+               if (i == sg_count)
+                       break;
+               sg_descriptor++;
+               if (i == max_sg_per_iu) {
+                       put_unaligned_le64(
+                               (u64)io_request->sg_chain_buffer_dma_handle,
+                               &sg_descriptor->address);
+                       put_unaligned_le32((sg_count - num_sg_in_iu)
+                               * sizeof(*sg_descriptor),
+                               &sg_descriptor->length);
+                       put_unaligned_le32(CISS_SG_CHAIN,
+                               &sg_descriptor->flags);
+                       chained = true;
+                       num_sg_in_iu++;
+                       sg_descriptor = io_request->sg_chain_buffer;
+               }
+               sg = sg_next(sg);
+       }
+
+       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+       request->partial = chained;
+       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+       request->num_sg_descriptors = num_sg_in_iu;
+
+       return 0;
+}
+
+static void pqi_raid_io_complete(struct pqi_io_request *io_request,
+       void *context)
+{
+       struct scsi_cmnd *scmd;
+
+       scmd = io_request->scmd;
+       pqi_free_io_request(io_request);
+       scsi_dma_unmap(scmd);
+       pqi_scsi_done(scmd);
+}
+
+static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+       struct pqi_queue_group *queue_group)
+{
+       int rc;
+       size_t cdb_length;
+       struct pqi_io_request *io_request;
+       struct pqi_raid_path_request *request;
+
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_raid_io_complete;
+       io_request->scmd = scmd;
+
+       scmd->host_scribble = (unsigned char *)io_request;
+
+       request = io_request->iu;
+       memset(request, 0,
+               offsetof(struct pqi_raid_path_request, sg_descriptors));
+
+       request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+       put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
+       request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       put_unaligned_le16(io_request->index, &request->request_id);
+       request->error_index = request->request_id;
+       memcpy(request->lun_number, device->scsi3addr,
+               sizeof(request->lun_number));
+
+       cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
+       memcpy(request->cdb, scmd->cmnd, cdb_length);
+
+       switch (cdb_length) {
+       case 6:
+       case 10:
+       case 12:
+       case 16:
+               /* No bytes in the Additional CDB bytes field */
+               request->additional_cdb_bytes_usage =
+                       SOP_ADDITIONAL_CDB_BYTES_0;
+               break;
+       case 20:
+               /* 4 bytes in the Additional cdb field */
+               request->additional_cdb_bytes_usage =
+                       SOP_ADDITIONAL_CDB_BYTES_4;
+               break;
+       case 24:
+               /* 8 bytes in the Additional cdb field */
+               request->additional_cdb_bytes_usage =
+                       SOP_ADDITIONAL_CDB_BYTES_8;
+               break;
+       case 28:
+               /* 12 bytes in the Additional cdb field */
+               request->additional_cdb_bytes_usage =
+                       SOP_ADDITIONAL_CDB_BYTES_12;
+               break;
+       case 32:
+       default:
+               /* 16 bytes in the Additional cdb field */
+               request->additional_cdb_bytes_usage =
+                       SOP_ADDITIONAL_CDB_BYTES_16;
+               break;
+       }
+
+       switch (scmd->sc_data_direction) {
+       case DMA_TO_DEVICE:
+               request->data_direction = SOP_READ_FLAG;
+               break;
+       case DMA_FROM_DEVICE:
+               request->data_direction = SOP_WRITE_FLAG;
+               break;
+       case DMA_NONE:
+               request->data_direction = SOP_NO_DIRECTION_FLAG;
+               break;
+       case DMA_BIDIRECTIONAL:
+               request->data_direction = SOP_BIDIRECTIONAL;
+               break;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "unknown data direction: %d\n",
+                       scmd->sc_data_direction);
+               WARN_ON(scmd->sc_data_direction);
+               break;
+       }
+
+       rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
+       if (rc) {
+               pqi_free_io_request(io_request);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
+
+       return 0;
+}
+
+static void pqi_aio_io_complete(struct pqi_io_request *io_request,
+       void *context)
+{
+       struct scsi_cmnd *scmd;
+
+       scmd = io_request->scmd;
+       scsi_dma_unmap(scmd);
+       if (io_request->status == -EAGAIN)
+               set_host_byte(scmd, DID_IMM_RETRY);
+       pqi_free_io_request(io_request);
+       pqi_scsi_done(scmd);
+}
+
+static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+       struct pqi_queue_group *queue_group)
+{
+       return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
+               scmd->cmnd, scmd->cmd_len, queue_group, NULL);
+}
+
+static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
+       unsigned int cdb_length, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info)
+{
+       int rc;
+       struct pqi_io_request *io_request;
+       struct pqi_aio_path_request *request;
+
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_aio_io_complete;
+       io_request->scmd = scmd;
+
+       scmd->host_scribble = (unsigned char *)io_request;
+
+       request = io_request->iu;
+       memset(request, 0,
+               offsetof(struct pqi_raid_path_request, sg_descriptors));
+
+       request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
+       put_unaligned_le32(aio_handle, &request->nexus_id);
+       put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
+       request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       put_unaligned_le16(io_request->index, &request->request_id);
+       request->error_index = request->request_id;
+       if (cdb_length > sizeof(request->cdb))
+               cdb_length = sizeof(request->cdb);
+       request->cdb_length = cdb_length;
+       memcpy(request->cdb, cdb, cdb_length);
+
+       switch (scmd->sc_data_direction) {
+       case DMA_TO_DEVICE:
+               request->data_direction = SOP_READ_FLAG;
+               break;
+       case DMA_FROM_DEVICE:
+               request->data_direction = SOP_WRITE_FLAG;
+               break;
+       case DMA_NONE:
+               request->data_direction = SOP_NO_DIRECTION_FLAG;
+               break;
+       case DMA_BIDIRECTIONAL:
+               request->data_direction = SOP_BIDIRECTIONAL;
+               break;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "unknown data direction: %d\n",
+                       scmd->sc_data_direction);
+               WARN_ON(scmd->sc_data_direction);
+               break;
+       }
+
+       if (encryption_info) {
+               request->encryption_enable = true;
+               put_unaligned_le16(encryption_info->data_encryption_key_index,
+                       &request->data_encryption_key_index);
+               put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+                       &request->encrypt_tweak_lower);
+               put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+                       &request->encrypt_tweak_upper);
+       }
+
+       rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
+       if (rc) {
+               pqi_free_io_request(io_request);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+       return 0;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+       struct scsi_cmnd *scmd)
+{
+       int rc;
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_scsi_dev *device;
+       u16 hwq;
+       struct pqi_queue_group *queue_group;
+       bool raid_bypassed;
+
+       device = scmd->device->hostdata;
+       ctrl_info = shost_to_hba(shost);
+
+       if (pqi_ctrl_offline(ctrl_info)) {
+               set_host_byte(scmd, DID_NO_CONNECT);
+               pqi_scsi_done(scmd);
+               return 0;
+       }
+
+       /*
+        * This is necessary because the SML doesn't zero out this field during
+        * error recovery.
+        */
+       scmd->result = 0;
+
+       hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+       if (hwq >= ctrl_info->num_queue_groups)
+               hwq = 0;
+
+       queue_group = &ctrl_info->queue_groups[hwq];
+
+       if (pqi_is_logical_device(device)) {
+               raid_bypassed = false;
+               if (device->offload_enabled &&
+                       scmd->request->cmd_type == REQ_TYPE_FS) {
+                       rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
+                               scmd, queue_group);
+                       if (rc == 0 ||
+                               rc == SCSI_MLQUEUE_HOST_BUSY ||
+                               rc == SAM_STAT_CHECK_CONDITION ||
+                               rc == SAM_STAT_RESERVATION_CONFLICT)
+                               raid_bypassed = true;
+               }
+               if (!raid_bypassed)
+                       rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
+                               queue_group);
+       } else {
+               if (device->aio_enabled)
+                       rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
+                               queue_group);
+               else
+                       rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
+                               queue_group);
+       }
+
+       return rc;
+}
+
+static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
+       void *context)
+{
+       struct completion *waiting = context;
+
+       complete(waiting);
+}
+
+#define PQI_LUN_RESET_TIMEOUT_SECS     10
+
+static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct completion *wait)
+{
+       int rc;
+       unsigned int wait_secs = 0;
+
+       while (1) {
+               if (wait_for_completion_io_timeout(wait,
+                       PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
+                       rc = 0;
+                       break;
+               }
+
+               pqi_check_ctrl_health(ctrl_info);
+               if (pqi_ctrl_offline(ctrl_info)) {
+                       rc = -ETIMEDOUT;
+                       break;
+               }
+
+               wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
+
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
+                       ctrl_info->scsi_host->host_no, device->bus,
+                       device->target, device->lun, wait_secs);
+       }
+
+       return rc;
+}
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       struct pqi_io_request *io_request;
+       DECLARE_COMPLETION_ONSTACK(wait);
+       struct pqi_task_management_request *request;
+
+       down(&ctrl_info->lun_reset_sem);
+
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_lun_reset_complete;
+       io_request->context = &wait;
+
+       request = io_request->iu;
+       memset(request, 0, sizeof(*request));
+
+       request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
+       put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
+               &request->header.iu_length);
+       put_unaligned_le16(io_request->index, &request->request_id);
+       memcpy(request->lun_number, device->scsi3addr,
+               sizeof(request->lun_number));
+       request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+
+       pqi_start_io(ctrl_info,
+               &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+               io_request);
+
+       rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+       if (rc == 0)
+               rc = io_request->status;
+
+       pqi_free_io_request(io_request);
+       up(&ctrl_info->lun_reset_sem);
+
+       return rc;
+}
+
+/* Performs a reset at the LUN level. */
+
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+
+       pqi_check_ctrl_health(ctrl_info);
+       if (pqi_ctrl_offline(ctrl_info))
+               return FAILED;
+
+       rc = pqi_lun_reset(ctrl_info, device);
+
+       return rc == 0 ? SUCCESS : FAILED;
+}
+
+static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+       int rc;
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_scsi_dev *device;
+
+       ctrl_info = shost_to_hba(scmd->device->host);
+       device = scmd->device->hostdata;
+
+       dev_err(&ctrl_info->pci_dev->dev,
+               "resetting scsi %d:%d:%d:%d\n",
+               ctrl_info->scsi_host->host_no,
+               device->bus, device->target, device->lun);
+
+       rc = pqi_device_reset(ctrl_info, device);
+
+       dev_err(&ctrl_info->pci_dev->dev,
+               "reset of scsi %d:%d:%d:%d: %s\n",
+               ctrl_info->scsi_host->host_no,
+               device->bus, device->target, device->lun,
+               rc == SUCCESS ? "SUCCESS" : "FAILED");
+
+       return rc;
+}
+
+static int pqi_slave_alloc(struct scsi_device *sdev)
+{
+       struct pqi_scsi_dev *device;
+       unsigned long flags;
+       struct pqi_ctrl_info *ctrl_info;
+       struct scsi_target *starget;
+       struct sas_rphy *rphy;
+
+       ctrl_info = shost_to_hba(sdev->host);
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+       if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
+               starget = scsi_target(sdev);
+               rphy = target_to_rphy(starget);
+               device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+               if (device) {
+                       device->target = sdev_id(sdev);
+                       device->lun = sdev->lun;
+                       device->target_lun_valid = true;
+               }
+       } else {
+               device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
+                       sdev_id(sdev), sdev->lun);
+       }
+
+       if (device && device->expose_device) {
+               sdev->hostdata = device;
+               device->sdev = sdev;
+               if (device->queue_depth) {
+                       device->advertised_queue_depth = device->queue_depth;
+                       scsi_change_queue_depth(sdev,
+                               device->advertised_queue_depth);
+               }
+       }
+
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+       return 0;
+}
+
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+       struct pqi_scsi_dev *device;
+
+       device = sdev->hostdata;
+       if (!device->expose_device)
+               sdev->no_uld_attach = true;
+
+       return 0;
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
+       void __user *arg)
+{
+       struct pci_dev *pci_dev;
+       u32 subsystem_vendor;
+       u32 subsystem_device;
+       cciss_pci_info_struct pciinfo;
+
+       if (!arg)
+               return -EINVAL;
+
+       pci_dev = ctrl_info->pci_dev;
+
+       pciinfo.domain = pci_domain_nr(pci_dev->bus);
+       pciinfo.bus = pci_dev->bus->number;
+       pciinfo.dev_fn = pci_dev->devfn;
+       subsystem_vendor = pci_dev->subsystem_vendor;
+       subsystem_device = pci_dev->subsystem_device;
+       pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
+               subsystem_vendor;
+
+       if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int pqi_getdrivver_ioctl(void __user *arg)
+{
+       u32 version;
+
+       if (!arg)
+               return -EINVAL;
+
+       version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
+               (DRIVER_RELEASE << 16) | DRIVER_REVISION;
+
+       if (copy_to_user(arg, &version, sizeof(version)))
+               return -EFAULT;
+
+       return 0;
+}
+
+struct ciss_error_info {
+       u8      scsi_status;
+       int     command_status;
+       size_t  sense_data_length;
+};
+
+static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
+       struct ciss_error_info *ciss_error_info)
+{
+       int ciss_cmd_status;
+       size_t sense_data_length;
+
+       switch (pqi_error_info->data_out_result) {
+       case PQI_DATA_IN_OUT_GOOD:
+               ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
+               break;
+       case PQI_DATA_IN_OUT_UNDERFLOW:
+               ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
+               break;
+       case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
+               ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
+               break;
+       case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
+       case PQI_DATA_IN_OUT_BUFFER_ERROR:
+       case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
+       case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
+       case PQI_DATA_IN_OUT_ERROR:
+               ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
+               break;
+       case PQI_DATA_IN_OUT_HARDWARE_ERROR:
+       case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
+       case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
+       case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
+       case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
+       case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
+       case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
+       case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
+       case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
+       case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
+               ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
+               break;
+       case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
+               ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
+               break;
+       case PQI_DATA_IN_OUT_ABORTED:
+               ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
+               break;
+       case PQI_DATA_IN_OUT_TIMEOUT:
+               ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
+               break;
+       default:
+               ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
+               break;
+       }
+
+       sense_data_length =
+               get_unaligned_le16(&pqi_error_info->sense_data_length);
+       if (sense_data_length == 0)
+               sense_data_length =
+               get_unaligned_le16(&pqi_error_info->response_data_length);
+       if (sense_data_length)
+               if (sense_data_length > sizeof(pqi_error_info->data))
+                       sense_data_length = sizeof(pqi_error_info->data);
+
+       ciss_error_info->scsi_status = pqi_error_info->status;
+       ciss_error_info->command_status = ciss_cmd_status;
+       ciss_error_info->sense_data_length = sense_data_length;
+}
+
+static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
+{
+       int rc;
+       char *kernel_buffer = NULL;
+       u16 iu_length;
+       size_t sense_data_length;
+       IOCTL_Command_struct iocommand;
+       struct pqi_raid_path_request request;
+       struct pqi_raid_error_info pqi_error_info;
+       struct ciss_error_info ciss_error_info;
+
+       if (pqi_ctrl_offline(ctrl_info))
+               return -ENXIO;
+       if (!arg)
+               return -EINVAL;
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
+       if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
+               return -EFAULT;
+       if (iocommand.buf_size < 1 &&
+               iocommand.Request.Type.Direction != XFER_NONE)
+               return -EINVAL;
+       if (iocommand.Request.CDBLen > sizeof(request.cdb))
+               return -EINVAL;
+       if (iocommand.Request.Type.Type != TYPE_CMD)
+               return -EINVAL;
+
+       switch (iocommand.Request.Type.Direction) {
+       case XFER_NONE:
+       case XFER_WRITE:
+       case XFER_READ:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (iocommand.buf_size > 0) {
+               kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
+               if (!kernel_buffer)
+                       return -ENOMEM;
+               if (iocommand.Request.Type.Direction & XFER_WRITE) {
+                       if (copy_from_user(kernel_buffer, iocommand.buf,
+                               iocommand.buf_size)) {
+                               rc = -EFAULT;
+                               goto out;
+                       }
+               } else {
+                       memset(kernel_buffer, 0, iocommand.buf_size);
+               }
+       }
+
+       memset(&request, 0, sizeof(request));
+
+       request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+       iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
+               sizeof(request.lun_number));
+       memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
+       request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
+
+       switch (iocommand.Request.Type.Direction) {
+       case XFER_NONE:
+               request.data_direction = SOP_NO_DIRECTION_FLAG;
+               break;
+       case XFER_WRITE:
+               request.data_direction = SOP_WRITE_FLAG;
+               break;
+       case XFER_READ:
+               request.data_direction = SOP_READ_FLAG;
+               break;
+       }
+
+       request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+
+       if (iocommand.buf_size > 0) {
+               put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
+
+               rc = pqi_map_single(ctrl_info->pci_dev,
+                       &request.sg_descriptors[0], kernel_buffer,
+                       iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+               if (rc)
+                       goto out;
+
+               iu_length += sizeof(request.sg_descriptors[0]);
+       }
+
+       put_unaligned_le16(iu_length, &request.header.iu_length);
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+               PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+
+       if (iocommand.buf_size > 0)
+               pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+                       PCI_DMA_BIDIRECTIONAL);
+
+       memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
+
+       if (rc == 0) {
+               pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
+               iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
+               iocommand.error_info.CommandStatus =
+                       ciss_error_info.command_status;
+               sense_data_length = ciss_error_info.sense_data_length;
+               if (sense_data_length) {
+                       if (sense_data_length >
+                               sizeof(iocommand.error_info.SenseInfo))
+                               sense_data_length =
+                                       sizeof(iocommand.error_info.SenseInfo);
+                       memcpy(iocommand.error_info.SenseInfo,
+                               pqi_error_info.data, sense_data_length);
+                       iocommand.error_info.SenseLen = sense_data_length;
+               }
+       }
+
+       if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       if (rc == 0 && iocommand.buf_size > 0 &&
+               (iocommand.Request.Type.Direction & XFER_READ)) {
+               if (copy_to_user(iocommand.buf, kernel_buffer,
+                       iocommand.buf_size)) {
+                       rc = -EFAULT;
+               }
+       }
+
+out:
+       kfree(kernel_buffer);
+
+       return rc;
+}
+
+static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+       int rc;
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = shost_to_hba(sdev->host);
+
+       switch (cmd) {
+       case CCISS_DEREGDISK:
+       case CCISS_REGNEWDISK:
+       case CCISS_REGNEWD:
+               rc = pqi_scan_scsi_devices(ctrl_info);
+               break;
+       case CCISS_GETPCIINFO:
+               rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
+               break;
+       case CCISS_GETDRIVVER:
+               rc = pqi_getdrivver_ioctl(arg);
+               break;
+       case CCISS_PASSTHRU:
+               rc = pqi_passthru_ioctl(ctrl_info, arg);
+               break;
+       default:
+               rc = -EINVAL;
+               break;
+       }
+
+       return rc;
+}
+
+static ssize_t pqi_version_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+{
+       ssize_t count = 0;
+       struct Scsi_Host *shost;
+       struct pqi_ctrl_info *ctrl_info;
+
+       shost = class_to_shost(dev);
+       ctrl_info = shost_to_hba(shost);
+
+       count += snprintf(buffer + count, PAGE_SIZE - count,
+               "  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+
+       count += snprintf(buffer + count, PAGE_SIZE - count,
+               "firmware: %s\n", ctrl_info->firmware_version);
+
+       return count;
+}
+
+static ssize_t pqi_host_rescan_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+
+       pqi_scan_start(shost);
+
+       return count;
+}
+
+static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
+
+static struct device_attribute *pqi_shost_attrs[] = {
+       &dev_attr_version,
+       &dev_attr_rescan,
+       NULL
+};
+
+static ssize_t pqi_sas_address_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+{
+       struct pqi_ctrl_info *ctrl_info;
+       struct scsi_device *sdev;
+       struct pqi_scsi_dev *device;
+       unsigned long flags;
+       u64 sas_address;
+
+       sdev = to_scsi_device(dev);
+       ctrl_info = shost_to_hba(sdev->host);
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+       device = sdev->hostdata;
+       if (pqi_is_logical_device(device)) {
+               spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+                       flags);
+               return -ENODEV;
+       }
+       sas_address = device->sas_address;
+
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+       return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+}
+
+static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+{
+       struct pqi_ctrl_info *ctrl_info;
+       struct scsi_device *sdev;
+       struct pqi_scsi_dev *device;
+       unsigned long flags;
+
+       sdev = to_scsi_device(dev);
+       ctrl_info = shost_to_hba(sdev->host);
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+       device = sdev->hostdata;
+       buffer[0] = device->offload_enabled ? '1' : '0';
+       buffer[1] = '\n';
+       buffer[2] = '\0';
+
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+       return 2;
+}
+
+static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
+       pqi_ssd_smart_path_enabled_show, NULL);
+
+static struct device_attribute *pqi_sdev_attrs[] = {
+       &dev_attr_sas_address,
+       &dev_attr_ssd_smart_path_enabled,
+       NULL
+};
+
+static struct scsi_host_template pqi_driver_template = {
+       .module = THIS_MODULE,
+       .name = DRIVER_NAME_SHORT,
+       .proc_name = DRIVER_NAME_SHORT,
+       .queuecommand = pqi_scsi_queue_command,
+       .scan_start = pqi_scan_start,
+       .scan_finished = pqi_scan_finished,
+       .this_id = -1,
+       .use_clustering = ENABLE_CLUSTERING,
+       .eh_device_reset_handler = pqi_eh_device_reset_handler,
+       .ioctl = pqi_ioctl,
+       .slave_alloc = pqi_slave_alloc,
+       .slave_configure = pqi_slave_configure,
+       .sdev_attrs = pqi_sdev_attrs,
+       .shost_attrs = pqi_shost_attrs,
+};
+
+static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct Scsi_Host *shost;
+
+       shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
+       if (!shost) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "scsi_host_alloc failed for controller %u\n",
+                       ctrl_info->ctrl_id);
+               return -ENOMEM;
+       }
+
+       shost->io_port = 0;
+       shost->n_io_port = 0;
+       shost->this_id = -1;
+       shost->max_channel = PQI_MAX_BUS;
+       shost->max_cmd_len = MAX_COMMAND_SIZE;
+       shost->max_lun = ~0;
+       shost->max_id = ~0;
+       shost->max_sectors = ctrl_info->max_sectors;
+       shost->can_queue = ctrl_info->scsi_ml_can_queue;
+       shost->cmd_per_lun = shost->can_queue;
+       shost->sg_tablesize = ctrl_info->sg_tablesize;
+       shost->transportt = pqi_sas_transport_template;
+       shost->irq = ctrl_info->msix_vectors[0];
+       shost->unique_id = shost->irq;
+       shost->nr_hw_queues = ctrl_info->num_queue_groups;
+       shost->hostdata[0] = (unsigned long)ctrl_info;
+
+       rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "scsi_add_host failed for controller %u\n",
+                       ctrl_info->ctrl_id);
+               goto free_host;
+       }
+
+       rc = pqi_add_sas_host(shost, ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "add SAS host failed for controller %u\n",
+                       ctrl_info->ctrl_id);
+               goto remove_host;
+       }
+
+       ctrl_info->scsi_host = shost;
+
+       return 0;
+
+remove_host:
+       scsi_remove_host(shost);
+free_host:
+       scsi_host_put(shost);
+
+       return rc;
+}
+
+static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
+{
+       struct Scsi_Host *shost;
+
+       pqi_delete_sas_host(ctrl_info);
+
+       shost = ctrl_info->scsi_host;
+       if (!shost)
+               return;
+
+       scsi_remove_host(shost);
+       scsi_host_put(shost);
+}
+
+#define PQI_RESET_ACTION_RESET         0x1
+
+#define PQI_RESET_TYPE_NO_RESET                0x0
+#define PQI_RESET_TYPE_SOFT_RESET      0x1
+#define PQI_RESET_TYPE_FIRM_RESET      0x2
+#define PQI_RESET_TYPE_HARD_RESET      0x3
+
+static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       u32 reset_params;
+
+       reset_params = (PQI_RESET_ACTION_RESET << 5) |
+               PQI_RESET_TYPE_HARD_RESET;
+
+       writel(reset_params,
+               &ctrl_info->pqi_registers->device_reset);
+
+       rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+       if (rc)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "PQI reset failed\n");
+
+       return rc;
+}
+
+static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct bmic_identify_controller *identify;
+
+       identify = kmalloc(sizeof(*identify), GFP_KERNEL);
+       if (!identify)
+               return -ENOMEM;
+
+       rc = pqi_identify_controller(ctrl_info, identify);
+       if (rc)
+               goto out;
+
+       memcpy(ctrl_info->firmware_version, identify->firmware_version,
+               sizeof(identify->firmware_version));
+       ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
+       snprintf(ctrl_info->firmware_version +
+               strlen(ctrl_info->firmware_version),
+               sizeof(ctrl_info->firmware_version),
+               "-%u", get_unaligned_le16(&identify->firmware_build_number));
+
+out:
+       kfree(identify);
+
+       return rc;
+}
+
+static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
+{
+       if (!sis_is_firmware_running(ctrl_info))
+               return -ENXIO;
+
+       if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
+               sis_disable_msix(ctrl_info);
+               if (pqi_reset(ctrl_info) == 0)
+                       sis_reenable_sis_mode(ctrl_info);
+       }
+
+       return 0;
+}
+
+static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+
+       if (reset_devices) {
+               rc = pqi_kdump_init(ctrl_info);
+               if (rc)
+                       return rc;
+       }
+
+       /*
+        * When the controller comes out of reset, it is always running
+        * in legacy SIS mode.  This is so that it can be compatible
+        * with legacy drivers shipped with OSes.  So we have to talk
+        * to it using SIS commands at first.  Once we are satisified
+        * that the controller supports PQI, we transition it into PQI
+        * mode.
+        */
+
+       /*
+        * Wait until the controller is ready to start accepting SIS
+        * commands.
+        */
+       rc = sis_wait_for_ctrl_ready(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error initializing SIS interface\n");
+               return rc;
+       }
+
+       /*
+        * Get the controller properties.  This allows us to determine
+        * whether or not it supports PQI mode.
+        */
+       rc = sis_get_ctrl_properties(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error obtaining controller properties\n");
+               return rc;
+       }
+
+       rc = sis_get_pqi_capabilities(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error obtaining controller capabilities\n");
+               return rc;
+       }
+
+       if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
+               ctrl_info->max_outstanding_requests =
+                       PQI_MAX_OUTSTANDING_REQUESTS;
+
+       pqi_calculate_io_resources(ctrl_info);
+
+       rc = pqi_alloc_error_buffer(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "failed to allocate PQI error buffer\n");
+               return rc;
+       }
+
+       /*
+        * If the function we are about to call succeeds, the
+        * controller will transition from legacy SIS mode
+        * into PQI mode.
+        */
+       rc = sis_init_base_struct_addr(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error initializing PQI mode\n");
+               return rc;
+       }
+
+       /* Wait for the controller to complete the SIS -> PQI transition. */
+       rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "transition to PQI mode failed\n");
+               return rc;
+       }
+
+       /* From here on, we are running in PQI mode. */
+       ctrl_info->pqi_mode_enabled = true;
+       pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+
+       rc = pqi_alloc_admin_queues(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error allocating admin queues\n");
+               return rc;
+       }
+
+       rc = pqi_create_admin_queues(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error creating admin queues\n");
+               return rc;
+       }
+
+       rc = pqi_report_device_capability(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "obtaining device capability failed\n");
+               return rc;
+       }
+
+       rc = pqi_validate_device_capability(ctrl_info);
+       if (rc)
+               return rc;
+
+       pqi_calculate_queue_resources(ctrl_info);
+
+       rc = pqi_enable_msix_interrupts(ctrl_info);
+       if (rc)
+               return rc;
+
+       if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
+               ctrl_info->max_msix_vectors =
+                       ctrl_info->num_msix_vectors_enabled;
+               pqi_calculate_queue_resources(ctrl_info);
+       }
+
+       rc = pqi_alloc_io_resources(ctrl_info);
+       if (rc)
+               return rc;
+
+       rc = pqi_alloc_operational_queues(ctrl_info);
+       if (rc)
+               return rc;
+
+       pqi_init_operational_queues(ctrl_info);
+
+       rc = pqi_request_irqs(ctrl_info);
+       if (rc)
+               return rc;
+
+       pqi_irq_set_affinity_hint(ctrl_info);
+
+       rc = pqi_create_queues(ctrl_info);
+       if (rc)
+               return rc;
+
+       sis_enable_msix(ctrl_info);
+
+       rc = pqi_configure_events(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error configuring events\n");
+               return rc;
+       }
+
+       pqi_start_heartbeat_timer(ctrl_info);
+
+       ctrl_info->controller_online = true;
+
+       /* Register with the SCSI subsystem. */
+       rc = pqi_register_scsi(ctrl_info);
+       if (rc)
+               return rc;
+
+       rc = pqi_get_ctrl_firmware_version(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error obtaining firmware version\n");
+               return rc;
+       }
+
+       rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error updating host wellness\n");
+               return rc;
+       }
+
+       pqi_schedule_update_time_worker(ctrl_info);
+
+       pqi_scan_scsi_devices(ctrl_info);
+
+       return 0;
+}
+
+static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       u64 mask;
+
+       rc = pci_enable_device(ctrl_info->pci_dev);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "failed to enable PCI device\n");
+               return rc;
+       }
+
+       if (sizeof(dma_addr_t) > 4)
+               mask = DMA_BIT_MASK(64);
+       else
+               mask = DMA_BIT_MASK(32);
+
+       rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
+               goto disable_device;
+       }
+
+       rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "failed to obtain PCI resources\n");
+               goto disable_device;
+       }
+
+       ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+               ctrl_info->pci_dev, 0),
+               sizeof(struct pqi_ctrl_registers));
+       if (!ctrl_info->iomem_base) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "failed to map memory for controller registers\n");
+               rc = -ENOMEM;
+               goto release_regions;
+       }
+
+       ctrl_info->registers = ctrl_info->iomem_base;
+       ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+
+       /* Enable bus mastering. */
+       pci_set_master(ctrl_info->pci_dev);
+
+       pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
+
+       return 0;
+
+release_regions:
+       pci_release_regions(ctrl_info->pci_dev);
+disable_device:
+       pci_disable_device(ctrl_info->pci_dev);
+
+       return rc;
+}
+
+static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
+{
+       iounmap(ctrl_info->iomem_base);
+       pci_release_regions(ctrl_info->pci_dev);
+       pci_disable_device(ctrl_info->pci_dev);
+       pci_set_drvdata(ctrl_info->pci_dev, NULL);
+}
+
+static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
+{
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
+                       GFP_KERNEL, numa_node);
+       if (!ctrl_info)
+               return NULL;
+
+       mutex_init(&ctrl_info->scan_mutex);
+
+       INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
+       spin_lock_init(&ctrl_info->scsi_device_list_lock);
+
+       INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
+       atomic_set(&ctrl_info->num_interrupts, 0);
+
+       INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
+       INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
+
+       sema_init(&ctrl_info->sync_request_sem,
+               PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
+       sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
+
+       ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
+       ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
+
+       return ctrl_info;
+}
+
+static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
+{
+       kfree(ctrl_info);
+}
+
+static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+       pqi_irq_unset_affinity_hint(ctrl_info);
+       pqi_free_irqs(ctrl_info);
+       if (ctrl_info->num_msix_vectors_enabled)
+               pci_disable_msix(ctrl_info->pci_dev);
+}
+
+static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
+{
+       pqi_stop_heartbeat_timer(ctrl_info);
+       pqi_free_interrupts(ctrl_info);
+       if (ctrl_info->queue_memory_base)
+               dma_free_coherent(&ctrl_info->pci_dev->dev,
+                       ctrl_info->queue_memory_length,
+                       ctrl_info->queue_memory_base,
+                       ctrl_info->queue_memory_base_dma_handle);
+       if (ctrl_info->admin_queue_memory_base)
+               dma_free_coherent(&ctrl_info->pci_dev->dev,
+                       ctrl_info->admin_queue_memory_length,
+                       ctrl_info->admin_queue_memory_base,
+                       ctrl_info->admin_queue_memory_base_dma_handle);
+       pqi_free_all_io_requests(ctrl_info);
+       if (ctrl_info->error_buffer)
+               dma_free_coherent(&ctrl_info->pci_dev->dev,
+                       ctrl_info->error_buffer_length,
+                       ctrl_info->error_buffer,
+                       ctrl_info->error_buffer_dma_handle);
+       if (ctrl_info->iomem_base)
+               pqi_cleanup_pci_init(ctrl_info);
+       pqi_free_ctrl_info(ctrl_info);
+}
+
+static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
+{
+       cancel_delayed_work_sync(&ctrl_info->rescan_work);
+       cancel_delayed_work_sync(&ctrl_info->update_time_work);
+       pqi_remove_all_scsi_devices(ctrl_info);
+       pqi_unregister_scsi(ctrl_info);
+
+       if (ctrl_info->pqi_mode_enabled) {
+               sis_disable_msix(ctrl_info);
+               if (pqi_reset(ctrl_info) == 0)
+                       sis_reenable_sis_mode(ctrl_info);
+       }
+       pqi_free_ctrl_resources(ctrl_info);
+}
+
+static void pqi_print_ctrl_info(struct pci_dev *pdev,
+       const struct pci_device_id *id)
+{
+       char *ctrl_description;
+
+       if (id->driver_data) {
+               ctrl_description = (char *)id->driver_data;
+       } else {
+               switch (id->subvendor) {
+               case PCI_VENDOR_ID_HP:
+                       ctrl_description = hpe_branded_controller;
+                       break;
+               case PCI_VENDOR_ID_ADAPTEC2:
+               default:
+                       ctrl_description = microsemi_branded_controller;
+                       break;
+               }
+       }
+
+       dev_info(&pdev->dev, "%s found\n", ctrl_description);
+}
+
+static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       int rc;
+       int node;
+       struct pqi_ctrl_info *ctrl_info;
+
+       pqi_print_ctrl_info(pdev, id);
+
+       if (pqi_disable_device_id_wildcards &&
+               id->subvendor == PCI_ANY_ID &&
+               id->subdevice == PCI_ANY_ID) {
+               dev_warn(&pdev->dev,
+                       "controller not probed because device ID wildcards are disabled\n");
+               return -ENODEV;
+       }
+
+       if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
+               dev_warn(&pdev->dev,
+                       "controller device ID matched using wildcards\n");
+
+       node = dev_to_node(&pdev->dev);
+       if (node == NUMA_NO_NODE)
+               set_dev_node(&pdev->dev, 0);
+
+       ctrl_info = pqi_alloc_ctrl_info(node);
+       if (!ctrl_info) {
+               dev_err(&pdev->dev,
+                       "failed to allocate controller info block\n");
+               return -ENOMEM;
+       }
+
+       ctrl_info->pci_dev = pdev;
+
+       rc = pqi_pci_init(ctrl_info);
+       if (rc)
+               goto error;
+
+       rc = pqi_ctrl_init(ctrl_info);
+       if (rc)
+               goto error;
+
+       return 0;
+
+error:
+       pqi_remove_ctrl(ctrl_info);
+
+       return rc;
+}
+
+static void pqi_pci_remove(struct pci_dev *pdev)
+{
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = pci_get_drvdata(pdev);
+       if (!ctrl_info)
+               return;
+
+       pqi_remove_ctrl(ctrl_info);
+}
+
+static void pqi_shutdown(struct pci_dev *pdev)
+{
+       int rc;
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = pci_get_drvdata(pdev);
+       if (!ctrl_info)
+               goto error;
+
+       /*
+        * Write all data in the controller's battery-backed cache to
+        * storage.
+        */
+       rc = pqi_flush_cache(ctrl_info);
+       if (rc == 0)
+               return;
+
+error:
+       dev_warn(&pdev->dev,
+               "unable to flush controller cache\n");
+}
+
+/* Define the PCI IDs for the controllers that we support. */
+static const struct pci_device_id pqi_pci_id_table[] = {
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0110)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0600)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0601)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0602)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0603)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0650)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0651)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0652)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0653)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0654)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0655)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0700)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x0701)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0800)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0801)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0802)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0803)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0804)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0805)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0900)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0901)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0902)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0903)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0904)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0905)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x0906)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1001)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1100)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1101)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1102)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1150)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_ANY_ID, PCI_ANY_ID)
+       },
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
+
+static struct pci_driver pqi_pci_driver = {
+       .name = DRIVER_NAME_SHORT,
+       .id_table = pqi_pci_id_table,
+       .probe = pqi_pci_probe,
+       .remove = pqi_pci_remove,
+       .shutdown = pqi_shutdown,
+};
+
+static int __init pqi_init(void)
+{
+       int rc;
+
+       pr_info(DRIVER_NAME "\n");
+
+       pqi_sas_transport_template =
+               sas_attach_transport(&pqi_sas_transport_functions);
+       if (!pqi_sas_transport_template)
+               return -ENODEV;
+
+       rc = pci_register_driver(&pqi_pci_driver);
+       if (rc)
+               sas_release_transport(pqi_sas_transport_template);
+
+       return rc;
+}
+
+static void __exit pqi_cleanup(void)
+{
+       pci_unregister_driver(&pqi_pci_driver);
+       sas_release_transport(pqi_sas_transport_template);
+}
+
+module_init(pqi_init);
+module_exit(pqi_cleanup);
+
+static void __attribute__((unused)) verify_structures(void)
+{
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_host_to_ctrl_doorbell) != 0x20);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_interrupt_mask) != 0x34);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_ctrl_to_host_doorbell) != 0x9c);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_ctrl_to_host_doorbell_clear) != 0xa0);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_driver_scratch) != 0xb0);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_firmware_status) != 0xbc);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_mailbox) != 0x1000);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               pqi_registers) != 0x4000);
+
+       BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+               iu_type) != 0x0);
+       BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+               iu_length) != 0x2);
+       BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+               response_queue_id) != 0x4);
+       BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+               work_area) != 0x6);
+       BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
+
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               status) != 0x0);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               service_response) != 0x1);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               data_present) != 0x2);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               reserved) != 0x3);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               residual_count) != 0x4);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               data_length) != 0x8);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               reserved1) != 0xa);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+               data) != 0xc);
+       BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
+
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               data_in_result) != 0x0);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               data_out_result) != 0x1);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               reserved) != 0x2);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               status) != 0x5);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               status_qualifier) != 0x6);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               sense_data_length) != 0x8);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               response_data_length) != 0xa);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               data_in_transferred) != 0xc);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               data_out_transferred) != 0x10);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+               data) != 0x14);
+       BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
+
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               signature) != 0x0);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               function_and_status_code) != 0x8);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               max_admin_iq_elements) != 0x10);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               max_admin_oq_elements) != 0x11);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_iq_element_length) != 0x12);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_oq_element_length) != 0x13);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               max_reset_timeout) != 0x14);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               legacy_intx_status) != 0x18);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               legacy_intx_mask_set) != 0x1c);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               legacy_intx_mask_clear) != 0x20);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               device_status) != 0x40);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_iq_pi_offset) != 0x48);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_oq_ci_offset) != 0x50);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_iq_element_array_addr) != 0x58);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_oq_element_array_addr) != 0x60);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_iq_ci_addr) != 0x68);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_oq_pi_addr) != 0x70);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_iq_num_elements) != 0x78);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_oq_num_elements) != 0x79);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               admin_queue_int_msg_num) != 0x7a);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               device_error) != 0x80);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               error_details) != 0x88);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               device_reset) != 0x90);
+       BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+               power_action) != 0x94);
+       BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
+
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               header.work_area) != 6);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               function_code) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.report_device_capability.buffer_length) != 44);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.report_device_capability.sg_descriptor) != 48);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_iq.queue_id) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_iq.element_array_addr) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_iq.ci_addr) != 24);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_iq.num_elements) != 32);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_iq.element_length) != 34);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_iq.queue_protocol) != 36);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.queue_id) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.element_array_addr) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.pi_addr) != 24);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.num_elements) != 32);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.element_length) != 34);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.queue_protocol) != 36);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.int_msg_num) != 40);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.coalescing_count) != 42);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.min_coalescing_time) != 44);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.create_operational_oq.max_coalescing_time) != 48);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+               data.delete_operational_queue.queue_id) != 12);
+       BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
+       BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+               data.create_operational_iq) != 64 - 11);
+       BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+               data.create_operational_oq) != 64 - 11);
+       BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+               data.delete_operational_queue) != 64 - 11);
+
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               header.work_area) != 6);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               function_code) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               status) != 11);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               data.create_operational_iq.status_descriptor) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               data.create_operational_iq.iq_pi_offset) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               data.create_operational_oq.status_descriptor) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+               data.create_operational_oq.oq_ci_offset) != 16);
+       BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
+
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               header.response_queue_id) != 4);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               header.work_area) != 6);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               nexus_id) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               buffer_length) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               lun_number) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               protocol_specific) != 24);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               error_index) != 27);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               cdb) != 32);
+       BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+               sg_descriptors) != 64);
+       BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               header.response_queue_id) != 4);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               header.work_area) != 6);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               nexus_id) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               buffer_length) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               data_encryption_key_index) != 22);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               encrypt_tweak_lower) != 24);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               encrypt_tweak_upper) != 28);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               cdb) != 32);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               error_index) != 48);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               num_sg_descriptors) != 50);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               cdb_length) != 51);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               lun_number) != 52);
+       BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+               sg_descriptors) != 64);
+       BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+       BUILD_BUG_ON(offsetof(struct pqi_io_response,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_io_response,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_io_response,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_io_response,
+               error_index) != 10);
+
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               header.response_queue_id) != 4);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               data.report_event_configuration.buffer_length) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               data.report_event_configuration.sg_descriptors) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               data.set_event_configuration.global_event_oq_id) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               data.set_event_configuration.buffer_length) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+               data.set_event_configuration.sg_descriptors) != 16);
+
+       BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
+               max_inbound_iu_length) != 6);
+       BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
+               max_outbound_iu_length) != 14);
+       BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
+
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               data_length) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               iq_arbitration_priority_support_bitmask) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               maximum_aw_a) != 9);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               maximum_aw_b) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               maximum_aw_c) != 11);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               max_inbound_queues) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               max_elements_per_iq) != 18);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               max_iq_element_length) != 24);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               min_iq_element_length) != 26);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               max_outbound_queues) != 30);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               max_elements_per_oq) != 32);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               intr_coalescing_time_granularity) != 34);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               max_oq_element_length) != 36);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               min_oq_element_length) != 38);
+       BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+               iu_layer_descriptors) != 64);
+       BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
+
+       BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
+               event_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
+               oq_id) != 2);
+       BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
+
+       BUILD_BUG_ON(offsetof(struct pqi_event_config,
+               num_event_descriptors) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_event_config,
+               descriptors) != 4);
+
+       BUILD_BUG_ON(offsetof(struct pqi_event_response,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_event_response,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_event_response,
+               event_type) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_event_response,
+               event_id) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_event_response,
+               additional_event_id) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_event_response,
+               data) != 16);
+       BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
+
+       BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+               event_type) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+               event_id) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+               additional_event_id) != 12);
+       BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
+
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               nexus_id) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               lun_number) != 16);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               protocol_specific) != 24);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               outbound_queue_id_to_manage) != 26);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               request_id_to_manage) != 28);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+               task_management_function) != 30);
+       BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
+
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+               header.iu_type) != 0);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+               header.iu_length) != 2);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+               request_id) != 8);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+               nexus_id) != 10);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+               additional_response_info) != 12);
+       BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+               response_code) != 15);
+       BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
+
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               configured_logical_drive_count) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               configuration_signature) != 1);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               firmware_version) != 5);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               extended_logical_unit_count) != 154);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               firmware_build_number) != 190);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               controller_mode) != 292);
+
+       BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
+       BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
+       BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
+               PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+       BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
+               PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+       BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
+       BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
+               PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+       BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
+       BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
+               PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+
+       BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
+}
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
new file mode 100644 (file)
index 0000000..52ca4f9
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/kernel.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport_sas.h>
+#include "smartpqi.h"
+
+static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port)
+{
+       struct pqi_sas_phy *pqi_sas_phy;
+       struct sas_phy *phy;
+
+       pqi_sas_phy = kzalloc(sizeof(*pqi_sas_phy), GFP_KERNEL);
+       if (!pqi_sas_phy)
+               return NULL;
+
+       phy = sas_phy_alloc(pqi_sas_port->parent_node->parent_dev,
+               pqi_sas_port->next_phy_index);
+       if (!phy) {
+               kfree(pqi_sas_phy);
+               return NULL;
+       }
+
+       pqi_sas_port->next_phy_index++;
+       pqi_sas_phy->phy = phy;
+       pqi_sas_phy->parent_port = pqi_sas_port;
+
+       return pqi_sas_phy;
+}
+
+static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
+{
+       struct sas_phy *phy = pqi_sas_phy->phy;
+
+       sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
+       sas_phy_free(phy);
+       if (pqi_sas_phy->added_to_port)
+               list_del(&pqi_sas_phy->phy_list_entry);
+       kfree(pqi_sas_phy);
+}
+
+static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy)
+{
+       int rc;
+       struct pqi_sas_port *pqi_sas_port;
+       struct sas_phy *phy;
+       struct sas_identify *identify;
+
+       pqi_sas_port = pqi_sas_phy->parent_port;
+       phy = pqi_sas_phy->phy;
+
+       identify = &phy->identify;
+       memset(identify, 0, sizeof(*identify));
+       identify->sas_address = pqi_sas_port->sas_address;
+       identify->device_type = SAS_END_DEVICE;
+       identify->initiator_port_protocols = SAS_PROTOCOL_STP;
+       identify->target_port_protocols = SAS_PROTOCOL_STP;
+       phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
+       phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
+       phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
+       phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
+       phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+
+       rc = sas_phy_add(pqi_sas_phy->phy);
+       if (rc)
+               return rc;
+
+       sas_port_add_phy(pqi_sas_port->port, pqi_sas_phy->phy);
+       list_add_tail(&pqi_sas_phy->phy_list_entry,
+               &pqi_sas_port->phy_list_head);
+       pqi_sas_phy->added_to_port = true;
+
+       return 0;
+}
+
+static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
+       struct sas_rphy *rphy)
+{
+       struct sas_identify *identify;
+
+       identify = &rphy->identify;
+       identify->sas_address = pqi_sas_port->sas_address;
+       identify->initiator_port_protocols = SAS_PROTOCOL_STP;
+       identify->target_port_protocols = SAS_PROTOCOL_STP;
+
+       return sas_rphy_add(rphy);
+}
+
+static struct pqi_sas_port *pqi_alloc_sas_port(
+       struct pqi_sas_node *pqi_sas_node, u64 sas_address)
+{
+       int rc;
+       struct pqi_sas_port *pqi_sas_port;
+       struct sas_port *port;
+
+       pqi_sas_port = kzalloc(sizeof(*pqi_sas_port), GFP_KERNEL);
+       if (!pqi_sas_port)
+               return NULL;
+
+       INIT_LIST_HEAD(&pqi_sas_port->phy_list_head);
+       pqi_sas_port->parent_node = pqi_sas_node;
+
+       port = sas_port_alloc_num(pqi_sas_node->parent_dev);
+       if (!port)
+               goto free_pqi_port;
+
+       rc = sas_port_add(port);
+       if (rc)
+               goto free_sas_port;
+
+       pqi_sas_port->port = port;
+       pqi_sas_port->sas_address = sas_address;
+       list_add_tail(&pqi_sas_port->port_list_entry,
+               &pqi_sas_node->port_list_head);
+
+       return pqi_sas_port;
+
+free_sas_port:
+       sas_port_free(port);
+free_pqi_port:
+       kfree(pqi_sas_port);
+
+       return NULL;
+}
+
+static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
+{
+       struct pqi_sas_phy *pqi_sas_phy;
+       struct pqi_sas_phy *next;
+
+       list_for_each_entry_safe(pqi_sas_phy, next,
+                       &pqi_sas_port->phy_list_head, phy_list_entry)
+               pqi_free_sas_phy(pqi_sas_phy);
+
+       sas_port_delete(pqi_sas_port->port);
+       list_del(&pqi_sas_port->port_list_entry);
+       kfree(pqi_sas_port);
+}
+
+static struct pqi_sas_node *pqi_alloc_sas_node(struct device *parent_dev)
+{
+       struct pqi_sas_node *pqi_sas_node;
+
+       pqi_sas_node = kzalloc(sizeof(*pqi_sas_node), GFP_KERNEL);
+       if (pqi_sas_node) {
+               pqi_sas_node->parent_dev = parent_dev;
+               INIT_LIST_HEAD(&pqi_sas_node->port_list_head);
+       }
+
+       return pqi_sas_node;
+}
+
+static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
+{
+       struct pqi_sas_port *pqi_sas_port;
+       struct pqi_sas_port *next;
+
+       if (!pqi_sas_node)
+               return;
+
+       list_for_each_entry_safe(pqi_sas_port, next,
+                       &pqi_sas_node->port_list_head, port_list_entry)
+               pqi_free_sas_port(pqi_sas_port);
+
+       kfree(pqi_sas_node);
+}
+
+struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
+       struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy)
+{
+       struct pqi_scsi_dev *device;
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list,
+               scsi_device_list_entry) {
+               if (!device->sas_port)
+                       continue;
+               if (device->sas_port->rphy == rphy)
+                       return device;
+       }
+
+       return NULL;
+}
+
+int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct device *parent_dev;
+       struct pqi_sas_node *pqi_sas_node;
+       struct pqi_sas_port *pqi_sas_port;
+       struct pqi_sas_phy *pqi_sas_phy;
+
+       parent_dev = &shost->shost_gendev;
+
+       pqi_sas_node = pqi_alloc_sas_node(parent_dev);
+       if (!pqi_sas_node)
+               return -ENOMEM;
+
+       pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address);
+       if (!pqi_sas_port) {
+               rc = -ENODEV;
+               goto free_sas_node;
+       }
+
+       pqi_sas_phy = pqi_alloc_sas_phy(pqi_sas_port);
+       if (!pqi_sas_phy) {
+               rc = -ENODEV;
+               goto free_sas_port;
+       }
+
+       rc = pqi_sas_port_add_phy(pqi_sas_phy);
+       if (rc)
+               goto free_sas_phy;
+
+       ctrl_info->sas_host = pqi_sas_node;
+
+       return 0;
+
+free_sas_phy:
+       pqi_free_sas_phy(pqi_sas_phy);
+free_sas_port:
+       pqi_free_sas_port(pqi_sas_port);
+free_sas_node:
+       pqi_free_sas_node(pqi_sas_node);
+
+       return rc;
+}
+
+void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info)
+{
+       pqi_free_sas_node(ctrl_info->sas_host);
+}
+
+int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
+       struct pqi_scsi_dev *device)
+{
+       int rc;
+       struct pqi_sas_port *pqi_sas_port;
+       struct sas_rphy *rphy;
+
+       pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address);
+       if (!pqi_sas_port)
+               return -ENOMEM;
+
+       rphy = sas_end_device_alloc(pqi_sas_port->port);
+       if (!rphy) {
+               rc = -ENODEV;
+               goto free_sas_port;
+       }
+
+       pqi_sas_port->rphy = rphy;
+       device->sas_port = pqi_sas_port;
+
+       rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
+       if (rc)
+               goto free_sas_port;
+
+       return 0;
+
+free_sas_port:
+       pqi_free_sas_port(pqi_sas_port);
+       device->sas_port = NULL;
+
+       return rc;
+}
+
+void pqi_remove_sas_device(struct pqi_scsi_dev *device)
+{
+       if (device->sas_port) {
+               pqi_free_sas_port(device->sas_port);
+               device->sas_port = NULL;
+       }
+}
+
+static int pqi_sas_get_linkerrors(struct sas_phy *phy)
+{
+       return 0;
+}
+
+static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
+       u64 *identifier)
+{
+       return 0;
+}
+
+static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
+{
+       return -ENXIO;
+}
+
+static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+       return 0;
+}
+
+static int pqi_sas_phy_enable(struct sas_phy *phy, int enable)
+{
+       return 0;
+}
+
+static int pqi_sas_phy_setup(struct sas_phy *phy)
+{
+       return 0;
+}
+
+static void pqi_sas_phy_release(struct sas_phy *phy)
+{
+}
+
+static int pqi_sas_phy_speed(struct sas_phy *phy,
+       struct sas_phy_linkrates *rates)
+{
+       return -EINVAL;
+}
+
+/* SMP = Serial Management Protocol */
+
+static int pqi_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+       struct request *req)
+{
+       return -EINVAL;
+}
+
+struct sas_function_template pqi_sas_transport_functions = {
+       .get_linkerrors = pqi_sas_get_linkerrors,
+       .get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
+       .get_bay_identifier = pqi_sas_get_bay_identifier,
+       .phy_reset = pqi_sas_phy_reset,
+       .phy_enable = pqi_sas_phy_enable,
+       .phy_setup = pqi_sas_phy_setup,
+       .phy_release = pqi_sas_phy_release,
+       .set_phy_speed = pqi_sas_phy_speed,
+       .smp_handler = pqi_sas_smp_handler,
+};
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
new file mode 100644 (file)
index 0000000..71408f9
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <scsi/scsi_device.h>
+#include <asm/unaligned.h>
+#include "smartpqi.h"
+#include "smartpqi_sis.h"
+
+/* legacy SIS interface commands */
+#define SIS_CMD_GET_ADAPTER_PROPERTIES         0x19
+#define SIS_CMD_INIT_BASE_STRUCT_ADDRESS       0x1b
+#define SIS_CMD_GET_PQI_CAPABILITIES           0x3000
+
+/* for submission of legacy SIS commands */
+#define SIS_REENABLE_SIS_MODE                  0x1
+#define SIS_ENABLE_MSIX                                0x40
+#define SIS_SOFT_RESET                         0x100
+#define SIS_CMD_READY                          0x200
+#define SIS_CMD_COMPLETE                       0x1000
+#define SIS_CLEAR_CTRL_TO_HOST_DOORBELL                0x1000
+#define SIS_CMD_STATUS_SUCCESS                 0x1
+#define SIS_CMD_COMPLETE_TIMEOUT_SECS          30
+#define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS   10
+
+/* used with SIS_CMD_GET_ADAPTER_PROPERTIES command */
+#define SIS_EXTENDED_PROPERTIES_SUPPORTED      0x800000
+#define SIS_SMARTARRAY_FEATURES_SUPPORTED      0x2
+#define SIS_PQI_MODE_SUPPORTED                 0x4
+#define SIS_REQUIRED_EXTENDED_PROPERTIES       \
+       (SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED)
+
+/* used with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
+#define SIS_BASE_STRUCT_REVISION               9
+#define SIS_BASE_STRUCT_ALIGNMENT              16
+
+#define SIS_CTRL_KERNEL_UP                     0x80
+#define SIS_CTRL_KERNEL_PANIC                  0x100
+#define SIS_CTRL_READY_TIMEOUT_SECS            30
+#define SIS_CTRL_READY_POLL_INTERVAL_MSECS     10
+
+#pragma pack(1)
+
+/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
+struct sis_base_struct {
+       __le32  revision;               /* revision of this structure */
+       __le32  flags;                  /* reserved */
+       __le32  error_buffer_paddr_low; /* lower 32 bits of physical memory */
+                                       /* buffer for PQI error response */
+                                       /* data */
+       __le32  error_buffer_paddr_high;        /* upper 32 bits of physical */
+                                               /* memory buffer for PQI */
+                                               /* error response data */
+       __le32  error_buffer_element_length;    /* length of each PQI error */
+                                               /* response buffer element */
+                                               /*   in bytes */
+       __le32  error_buffer_num_elements;      /* total number of PQI error */
+                                               /* response buffers available */
+};
+
+#pragma pack()
+
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned long timeout;
+       u32 status;
+
+       timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+       while (1) {
+               status = readl(&ctrl_info->registers->sis_firmware_status);
+               if (status != ~0) {
+                       if (status & SIS_CTRL_KERNEL_PANIC) {
+                               dev_err(&ctrl_info->pci_dev->dev,
+                                       "controller is offline: status code 0x%x\n",
+                                       readl(
+                                       &ctrl_info->registers->sis_mailbox[7]));
+                               return -ENODEV;
+                       }
+                       if (status & SIS_CTRL_KERNEL_UP)
+                               break;
+               }
+               if (time_after(jiffies, timeout))
+                       return -ETIMEDOUT;
+               msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS);
+       }
+
+       return 0;
+}
+
+bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
+{
+       bool running;
+       u32 status;
+
+       status = readl(&ctrl_info->registers->sis_firmware_status);
+
+       if (status & SIS_CTRL_KERNEL_PANIC)
+               running = false;
+       else
+               running = true;
+
+       if (!running)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "controller is offline: status code 0x%x\n",
+                       readl(&ctrl_info->registers->sis_mailbox[7]));
+
+       return running;
+}
+
+/* used for passing command parameters/results when issuing SIS commands */
+struct sis_sync_cmd_params {
+       u32     mailbox[6];     /* mailboxes 0-5 */
+};
+
+static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
+       u32 cmd, struct sis_sync_cmd_params *params)
+{
+       struct pqi_ctrl_registers __iomem *registers;
+       unsigned int i;
+       unsigned long timeout;
+       u32 doorbell;
+       u32 cmd_status;
+
+       registers = ctrl_info->registers;
+
+       /* Write the command to mailbox 0. */
+       writel(cmd, &registers->sis_mailbox[0]);
+
+       /*
+        * Write the command parameters to mailboxes 1-4 (mailbox 5 is not used
+        * when sending a command to the controller).
+        */
+       for (i = 1; i <= 4; i++)
+               writel(params->mailbox[i], &registers->sis_mailbox[i]);
+
+       /* Clear the command doorbell. */
+       writel(SIS_CLEAR_CTRL_TO_HOST_DOORBELL,
+               &registers->sis_ctrl_to_host_doorbell_clear);
+
+       /* Disable doorbell interrupts by masking all interrupts. */
+       writel(~0, &registers->sis_interrupt_mask);
+
+       /*
+        * Force the completion of the interrupt mask register write before
+        * submitting the command.
+        */
+       readl(&registers->sis_interrupt_mask);
+
+       /* Submit the command to the controller. */
+       writel(SIS_CMD_READY, &registers->sis_host_to_ctrl_doorbell);
+
+       /*
+        * Poll for command completion.  Note that the call to msleep() is at
+        * the top of the loop in order to give the controller time to start
+        * processing the command before we start polling.
+        */
+       timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
+       while (1) {
+               msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
+               doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
+               if (doorbell & SIS_CMD_COMPLETE)
+                       break;
+               if (time_after(jiffies, timeout))
+                       return -ETIMEDOUT;
+       }
+
+       /* Read the command status from mailbox 0. */
+       cmd_status = readl(&registers->sis_mailbox[0]);
+       if (cmd_status != SIS_CMD_STATUS_SUCCESS) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "SIS command failed for command 0x%x: status = 0x%x\n",
+                       cmd, cmd_status);
+               return -EINVAL;
+       }
+
+       /*
+        * The command completed successfully, so save the command status and
+        * read the values returned in mailboxes 1-5.
+        */
+       params->mailbox[0] = cmd_status;
+       for (i = 1; i < ARRAY_SIZE(params->mailbox); i++)
+               params->mailbox[i] = readl(&registers->sis_mailbox[i]);
+
+       return 0;
+}
+
+/*
+ * This function verifies that we are talking to a controller that speaks PQI.
+ */
+
+int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       u32 properties;
+       u32 extended_properties;
+       struct sis_sync_cmd_params params;
+
+       memset(&params, 0, sizeof(params));
+
+       rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_ADAPTER_PROPERTIES,
+               &params);
+       if (rc)
+               return rc;
+
+       properties = params.mailbox[1];
+
+       if (!(properties & SIS_EXTENDED_PROPERTIES_SUPPORTED))
+               return -ENODEV;
+
+       extended_properties = params.mailbox[4];
+
+       if ((extended_properties & SIS_REQUIRED_EXTENDED_PROPERTIES) !=
+               SIS_REQUIRED_EXTENDED_PROPERTIES)
+               return -ENODEV;
+
+       return 0;
+}
+
+int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       struct sis_sync_cmd_params params;
+
+       memset(&params, 0, sizeof(params));
+
+       rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_PQI_CAPABILITIES,
+               &params);
+       if (rc)
+               return rc;
+
+       ctrl_info->max_sg_entries = params.mailbox[1];
+       ctrl_info->max_transfer_size = params.mailbox[2];
+       ctrl_info->max_outstanding_requests = params.mailbox[3];
+       ctrl_info->config_table_offset = params.mailbox[4];
+       ctrl_info->config_table_length = params.mailbox[5];
+
+       return 0;
+}
+
+int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       void *base_struct_unaligned;
+       struct sis_base_struct *base_struct;
+       struct sis_sync_cmd_params params;
+       unsigned long error_buffer_paddr;
+       dma_addr_t bus_address;
+
+       base_struct_unaligned = kzalloc(sizeof(*base_struct)
+               + SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
+       if (!base_struct_unaligned)
+               return -ENOMEM;
+
+       base_struct = PTR_ALIGN(base_struct_unaligned,
+               SIS_BASE_STRUCT_ALIGNMENT);
+       error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;
+
+       put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
+       put_unaligned_le32(lower_32_bits(error_buffer_paddr),
+               &base_struct->error_buffer_paddr_low);
+       put_unaligned_le32(upper_32_bits(error_buffer_paddr),
+               &base_struct->error_buffer_paddr_high);
+       put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
+               &base_struct->error_buffer_element_length);
+       put_unaligned_le32(ctrl_info->max_io_slots,
+               &base_struct->error_buffer_num_elements);
+
+       bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
+               sizeof(*base_struct), PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       memset(&params, 0, sizeof(params));
+       params.mailbox[1] = lower_32_bits((u64)bus_address);
+       params.mailbox[2] = upper_32_bits((u64)bus_address);
+       params.mailbox[3] = sizeof(*base_struct);
+
+       rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
+               &params);
+
+       pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
+               PCI_DMA_TODEVICE);
+
+out:
+       kfree(base_struct_unaligned);
+
+       return rc;
+}
+
+/* Enable MSI-X interrupts on the controller. */
+
+void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
+{
+       u32 doorbell_register;
+
+       doorbell_register =
+               readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+       doorbell_register |= SIS_ENABLE_MSIX;
+
+       writel(doorbell_register,
+               &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
+/* Disable MSI-X interrupts on the controller. */
+
+void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
+{
+       u32 doorbell_register;
+
+       doorbell_register =
+               readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+       doorbell_register &= ~SIS_ENABLE_MSIX;
+
+       writel(doorbell_register,
+               &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
+{
+       writel(SIS_SOFT_RESET,
+               &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
+#define SIS_MODE_READY_TIMEOUT_SECS    30
+
+int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       unsigned long timeout;
+       struct pqi_ctrl_registers __iomem *registers;
+       u32 doorbell;
+
+       registers = ctrl_info->registers;
+
+       writel(SIS_REENABLE_SIS_MODE,
+               &registers->sis_host_to_ctrl_doorbell);
+
+       rc = 0;
+       timeout = (SIS_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+       while (1) {
+               doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
+               if ((doorbell & SIS_REENABLE_SIS_MODE) == 0)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       rc = -ETIMEDOUT;
+                       break;
+               }
+       }
+
+       if (rc)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "re-enabling SIS mode failed\n");
+
+       return rc;
+}
+
+void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
+{
+       writel(value, &ctrl_info->registers->sis_driver_scratch);
+}
+
+u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
+{
+       return readl(&ctrl_info->registers->sis_driver_scratch);
+}
+
+static void __attribute__((unused)) verify_structures(void)
+{
+       BUILD_BUG_ON(offsetof(struct sis_base_struct,
+               revision) != 0x0);
+       BUILD_BUG_ON(offsetof(struct sis_base_struct,
+               flags) != 0x4);
+       BUILD_BUG_ON(offsetof(struct sis_base_struct,
+               error_buffer_paddr_low) != 0x8);
+       BUILD_BUG_ON(offsetof(struct sis_base_struct,
+               error_buffer_paddr_high) != 0xc);
+       BUILD_BUG_ON(offsetof(struct sis_base_struct,
+               error_buffer_element_length) != 0x10);
+       BUILD_BUG_ON(offsetof(struct sis_base_struct,
+               error_buffer_num_elements) != 0x14);
+       BUILD_BUG_ON(sizeof(struct sis_base_struct) != 0x18);
+}
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
new file mode 100644 (file)
index 0000000..bd6e7b0
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#if !defined(_SMARTPQI_SIS_H)
+#define _SMARTPQI_SIS_H
+
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
+bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
+int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
+int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
+int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
+void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
+void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
+void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
+u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
+
+#endif /* _SMARTPQI_SIS_H */
index ed179348de80b6d39a5600caf2784f37a3413237..bed2bbd6b92304cec9eabb73245224782da7fc21 100644 (file)
@@ -83,7 +83,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt);
 static int sr_done(struct scsi_cmnd *);
 static int sr_runtime_suspend(struct device *dev);
 
-static struct dev_pm_ops sr_pm_ops = {
+static const struct dev_pm_ops sr_pm_ops = {
        .runtime_suspend        = sr_runtime_suspend,
 };
 
index 70db6d999ca386c011ede2dd2c459eae84a086c2..dc03e47f7c5897c0767ad4f09df43ae97d2ef58f 100644 (file)
@@ -15,6 +15,7 @@
 
 #include "ufshcd-dwc.h"
 #include "ufshci-dwc.h"
+#include "tc-dwc-g210.h"
 
 /**
  * tc_dwc_g210_setup_40bit_rmmi()
index 2302f3ce5f860fcc94ae2f86fd1fcaea41a6a876..6bf1f8a022b10f6499dde0901b051e035d650c1a 100644 (file)
@@ -39,19 +39,28 @@ struct dk_cxlflash_hdr {
  * at this time, this provides future flexibility.
  */
 #define DK_CXLFLASH_ALL_PORTS_ACTIVE   0x0000000000000001ULL
+#define DK_CXLFLASH_APP_CLOSE_ADAP_FD  0x0000000000000002ULL
 
 /*
- * Notes:
- * -----
+ * General Notes:
+ * -------------
  * The 'context_id' field of all ioctl structures contains the context
  * identifier for a context in the lower 32-bits (upper 32-bits are not
  * to be used when identifying a context to the AFU). That said, the value
  * in its entirety (all 64-bits) is to be treated as an opaque cookie and
  * should be presented as such when issuing ioctls.
+ */
+
+/*
+ * DK_CXLFLASH_ATTACH Notes:
+ * ------------------------
+ * Read/write access permissions are specified via the O_RDONLY, O_WRONLY,
+ * and O_RDWR flags defined in the fcntl.h header file.
  *
- * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
- * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
- * the fcntl.h header file.
+ * A valid adapter file descriptor (fd >= 0) is only returned on the initial
+ * attach (successful) of a context. When a context is shared(reused), the user
+ * is expected to already 'know' the adapter file descriptor associated with the
+ * context.
  */
 #define DK_CXLFLASH_ATTACH_REUSE_CONTEXT       0x8000000000000000ULL
 
This page took 0.335608 seconds and 5 git commands to generate.