Merge branch 'scsi-fixes'
authorJames Bottomley <JBottomley@Parallels.com>
Sat, 23 Jul 2011 17:09:03 +0000 (21:09 +0400)
committerJames Bottomley <JBottomley@Parallels.com>
Sat, 23 Jul 2011 17:09:03 +0000 (21:09 +0400)
91 files changed:
MAINTAINERS
drivers/firmware/iscsi_ibft.c
drivers/scsi/aha152x.c
drivers/scsi/atari_NCR5380.c
drivers/scsi/atari_scsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/bfa/Makefile
drivers/scsi/bfa/bfa.h
drivers/scsi/bfa/bfa_core.c
drivers/scsi/bfa/bfa_defs.h
drivers/scsi/bfa/bfa_defs_fcs.h
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcbuild.c
drivers/scsi/bfa/bfa_fcbuild.h
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcpim.h
drivers/scsi/bfa/bfa_fcs.c
drivers/scsi/bfa/bfa_fcs.h
drivers/scsi/bfa/bfa_fcs_fcpim.c
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfa_fcs_rport.c
drivers/scsi/bfa/bfa_hw_cb.c
drivers/scsi/bfa/bfa_hw_ct.c
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_ioc_cb.c
drivers/scsi/bfa/bfa_ioc_ct.c
drivers/scsi/bfa/bfa_modules.h
drivers/scsi/bfa/bfa_port.c
drivers/scsi/bfa/bfa_port.h
drivers/scsi/bfa/bfa_svc.c
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_bsg.c [new file with mode: 0644]
drivers/scsi/bfa/bfad_bsg.h [new file with mode: 0644]
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/bfa/bfi.h
drivers/scsi/bfa/bfi_cbreg.h [deleted file]
drivers/scsi/bfa/bfi_ctreg.h [deleted file]
drivers/scsi/bfa/bfi_ms.h
drivers/scsi/bfa/bfi_reg.h [new file with mode: 0644]
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2i/57xx_iscsi_constants.h
drivers/scsi/bnx2i/57xx_iscsi_hsi.h
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/bnx2i/bnx2i_sysfs.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/iscsi_boot_sysfs.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/mac_scsi.c
drivers/scsi/mpt2sas/mpi/mpi2.h
drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
drivers/scsi/mpt2sas/mpi/mpi2_init.h
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_debug.h
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/ses.c
drivers/scsi/sun3_NCR5380.c
drivers/scsi/sun3_scsi.c
drivers/scsi/sun3_scsi_vme.c
drivers/target/tcm_fc/tfc_cmd.c
include/linux/iscsi_boot_sysfs.h
include/scsi/iscsi_proto.h
include/scsi/libfc.h
include/scsi/libiscsi.h

index 187282d..4851a79 100644 (file)
@@ -1550,6 +1550,12 @@ L:       linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/staging/brcm80211/
 
+BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
+M:     Bhanu Prakash Gollapudi <bprakash@broadcom.com>
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/bnx2fc/
+
 BROCADE BFA FC SCSI DRIVER
 M:     Jing Huang <huangj@brocade.com>
 L:     linux-scsi@vger.kernel.org
@@ -1772,7 +1778,8 @@ F:        include/linux/clk.h
 
 CISCO FCOE HBA DRIVER
 M:     Abhijeet Joglekar <abjoglek@cisco.com>
-M:     Joe Eykholt <jeykholt@cisco.com>
+M:     Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com>
+M:     Brian Uchino <buchino@cisco.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/fnic/
index ce33f46..c811cb1 100644 (file)
@@ -566,6 +566,11 @@ static mode_t __init ibft_check_initiator_for(void *data, int type)
        return rc;
 }
 
+static void ibft_kobj_release(void *data)
+{
+       kfree(data);
+}
+
 /*
  * Helper function for ibft_register_kobjects.
  */
@@ -595,7 +600,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
                boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
                                                ibft_kobj,
                                                ibft_attr_show_initiator,
-                                               ibft_check_initiator_for);
+                                               ibft_check_initiator_for,
+                                               ibft_kobj_release);
                if (!boot_kobj) {
                        rc = -ENOMEM;
                        goto free_ibft_obj;
@@ -610,7 +616,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
                boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
                                                       ibft_kobj,
                                                       ibft_attr_show_nic,
-                                                      ibft_check_nic_for);
+                                                      ibft_check_nic_for,
+                                                      ibft_kobj_release);
                if (!boot_kobj) {
                        rc = -ENOMEM;
                        goto free_ibft_obj;
@@ -625,7 +632,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
                boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
                                                     ibft_kobj,
                                                     ibft_attr_show_target,
-                                                    ibft_check_tgt_for);
+                                                    ibft_check_tgt_for,
+                                                    ibft_kobj_release);
                if (!boot_kobj) {
                        rc = -ENOMEM;
                        goto free_ibft_obj;
index c5169f0..f17c92c 100644 (file)
@@ -422,10 +422,19 @@ MODULE_PARM_DESC(aha152x1, "parameters for second controller");
 
 #ifdef __ISAPNP__
 static struct isapnp_device_id id_table[] __devinitdata = {
-       { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-               ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1505), 0 },
-       { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-               ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1530), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 },
+       { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 },
        { ISAPNP_DEVICE_SINGLE_END, }
 };
 MODULE_DEVICE_TABLE(isapnp, id_table);
index ea439f9..2db79b4 100644 (file)
@@ -892,6 +892,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
        return 0;
 }
 
+static void NCR5380_exit(struct Scsi_Host *instance)
+{
+       /* Empty, as we didn't schedule any delayed work */
+}
+
 /*
  * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
  *     void (*done)(Scsi_Cmnd *))
@@ -914,7 +919,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
 {
        SETUP_HOSTDATA(cmd->device->host);
        Scsi_Cmnd *tmp;
-       int oldto;
        unsigned long flags;
 
 #if (NDEBUG & NDEBUG_NO_WRITE)
index 3e8658e..04a154f 100644 (file)
@@ -730,6 +730,7 @@ int atari_scsi_release(struct Scsi_Host *sh)
                free_irq(IRQ_TT_MFP_SCSI, sh);
        if (atari_dma_buffer)
                atari_stram_free(atari_dma_buffer);
+       NCR5380_exit(sh);
        return 1;
 }
 
index 94b9a07..0a9bdfa 100644 (file)
@@ -215,73 +215,62 @@ unlock:
 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
 {
        struct beiscsi_hba *phba = data;
+       struct mgmt_session_info *boot_sess = &phba->boot_sess;
+       struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
        char *str = buf;
        int rc;
 
        switch (type) {
        case ISCSI_BOOT_TGT_NAME:
                rc = sprintf(buf, "%.*s\n",
-                               (int)strlen(phba->boot_sess.target_name),
-                               (char *)&phba->boot_sess.target_name);
+                           (int)strlen(boot_sess->target_name),
+                           (char *)&boot_sess->target_name);
                break;
        case ISCSI_BOOT_TGT_IP_ADDR:
-               if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1)
+               if (boot_conn->dest_ipaddr.ip_type == 0x1)
                        rc = sprintf(buf, "%pI4\n",
-                               (char *)&phba->boot_sess.conn_list[0].
-                               dest_ipaddr.ip_address);
+                               (char *)&boot_conn->dest_ipaddr.ip_address);
                else
                        rc = sprintf(str, "%pI6\n",
-                               (char *)&phba->boot_sess.conn_list[0].
-                               dest_ipaddr.ip_address);
+                               (char *)&boot_conn->dest_ipaddr.ip_address);
                break;
        case ISCSI_BOOT_TGT_PORT:
-               rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0].
-                                 dest_port);
+               rc = sprintf(str, "%d\n", boot_conn->dest_port);
                break;
 
        case ISCSI_BOOT_TGT_CHAP_NAME:
                rc = sprintf(str,  "%.*s\n",
-                                     phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     target_chap_name_length,
-                                     (char *)&phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     target_chap_name);
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            target_chap_name_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.target_chap_name);
                break;
        case ISCSI_BOOT_TGT_CHAP_SECRET:
                rc = sprintf(str,  "%.*s\n",
-                                     phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     target_secret_length,
-                                     (char *)&phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     target_secret);
-
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            target_secret_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.target_secret);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_NAME:
                rc = sprintf(str,  "%.*s\n",
-                                     phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     intr_chap_name_length,
-                                     (char *)&phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     intr_chap_name);
-
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            intr_chap_name_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.intr_chap_name);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-                       rc = sprintf(str,  "%.*s\n",
-                                     phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     intr_secret_length,
-                                     (char *)&phba->boot_sess.conn_list[0].
-                                     negotiated_login_options.auth_data.chap.
-                                     intr_secret);
+               rc = sprintf(str,  "%.*s\n",
+                            boot_conn->negotiated_login_options.auth_data.chap.
+                            intr_secret_length,
+                            (char *)&boot_conn->negotiated_login_options.
+                            auth_data.chap.intr_secret);
                break;
        case ISCSI_BOOT_TGT_FLAGS:
-                       rc = sprintf(str, "2\n");
+               rc = sprintf(str, "2\n");
                break;
        case ISCSI_BOOT_TGT_NIC_ASSOC:
-                       rc = sprintf(str, "0\n");
+               rc = sprintf(str, "0\n");
                break;
        default:
                rc = -ENOSYS;
@@ -315,10 +304,10 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
 
        switch (type) {
        case ISCSI_BOOT_ETH_FLAGS:
-                       rc = sprintf(str, "2\n");
+               rc = sprintf(str, "2\n");
                break;
        case ISCSI_BOOT_ETH_INDEX:
-                       rc = sprintf(str, "0\n");
+               rc = sprintf(str, "0\n");
                break;
        case ISCSI_BOOT_ETH_MAC:
                rc  = beiscsi_get_macaddr(buf, phba);
@@ -391,40 +380,6 @@ static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
        return rc;
 }
 
-static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
-{
-       struct iscsi_boot_kobj *boot_kobj;
-
-       phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
-       if (!phba->boot_kset)
-               return -ENOMEM;
-
-       /* get boot info using mgmt cmd */
-       boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
-                                            beiscsi_show_boot_tgt_info,
-                                            beiscsi_tgt_get_attr_visibility);
-       if (!boot_kobj)
-               goto free_kset;
-
-       boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
-                                            beiscsi_show_boot_ini_info,
-                                            beiscsi_ini_get_attr_visibility);
-       if (!boot_kobj)
-               goto free_kset;
-
-       boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
-                                            beiscsi_show_boot_eth_info,
-                                            beiscsi_eth_get_attr_visibility);
-       if (!boot_kobj)
-               goto free_kset;
-       return 0;
-
-free_kset:
-       if (phba->boot_kset)
-               iscsi_boot_destroy_kset(phba->boot_kset);
-       return -ENOMEM;
-}
-
 /*------------------- PCI Driver operations and data ----------------- */
 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -483,14 +438,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
        if (iscsi_host_add(shost, &phba->pcidev->dev))
                goto free_devices;
 
-       if (beiscsi_setup_boot_info(phba))
-               /*
-                * log error but continue, because we may not be using
-                * iscsi boot.
-                */
-               shost_printk(KERN_ERR, phba->shost, "Could not set up "
-               "iSCSI boot info.");
-
        return phba;
 
 free_devices:
@@ -3511,6 +3458,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
        unsigned int tag, wrb_num;
        unsigned short status, extd_status;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+       int ret = -ENOMEM;
 
        tag = beiscsi_get_boot_target(phba);
        if (!tag) {
@@ -3535,8 +3483,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
        boot_resp = embedded_payload(wrb);
 
        if (boot_resp->boot_session_handle < 0) {
-               printk(KERN_ERR "No Boot Session for this pci_func,"
-                       "session Hndl = %d\n", boot_resp->boot_session_handle);
+               shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
                return -ENXIO;
        }
 
@@ -3574,14 +3521,70 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
        wrb = queue_get_wrb(mccq, wrb_num);
        free_mcc_tag(&phba->ctrl, tag);
        session_resp = nonemb_cmd.va ;
+
        memcpy(&phba->boot_sess, &session_resp->session_info,
               sizeof(struct mgmt_session_info));
-       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                   nonemb_cmd.va, nonemb_cmd.dma);
-       return 0;
+       ret = 0;
+
 boot_freemem:
        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                    nonemb_cmd.va, nonemb_cmd.dma);
+       return ret;
+}
+
+static void beiscsi_boot_release(void *data)
+{
+       struct beiscsi_hba *phba = data;
+
+       scsi_host_put(phba->shost);
+}
+
+static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
+{
+       struct iscsi_boot_kobj *boot_kobj;
+
+       /* get boot info using mgmt cmd */
+       if (beiscsi_get_boot_info(phba))
+               /* Try to see if we can carry on without this */
+               return 0;
+
+       phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+       if (!phba->boot_kset)
+               return -ENOMEM;
+
+       /* get a ref because the show function will ref the phba */
+       if (!scsi_host_get(phba->shost))
+               goto free_kset;
+       boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
+                                            beiscsi_show_boot_tgt_info,
+                                            beiscsi_tgt_get_attr_visibility,
+                                            beiscsi_boot_release);
+       if (!boot_kobj)
+               goto put_shost;
+
+       if (!scsi_host_get(phba->shost))
+               goto free_kset;
+       boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
+                                               beiscsi_show_boot_ini_info,
+                                               beiscsi_ini_get_attr_visibility,
+                                               beiscsi_boot_release);
+       if (!boot_kobj)
+               goto put_shost;
+
+       if (!scsi_host_get(phba->shost))
+               goto free_kset;
+       boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
+                                              beiscsi_show_boot_eth_info,
+                                              beiscsi_eth_get_attr_visibility,
+                                              beiscsi_boot_release);
+       if (!boot_kobj)
+               goto put_shost;
+       return 0;
+
+put_shost:
+       scsi_host_put(phba->shost);
+free_kset:
+       iscsi_boot_destroy_kset(phba->boot_kset);
        return -ENOMEM;
 }
 
@@ -3963,11 +3966,10 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
        }
        memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
               dw[offsetof(struct amap_pdu_data_out, lun) / 32],
-              io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
+              &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
 
        AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
-                     cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
-                                 lun[0]));
+                     cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
        AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
        AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
                      io_task->pwrb_handle->wrb_index);
@@ -4150,8 +4152,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
                            phba->ctrl.mbox_mem_alloced.size,
                            phba->ctrl.mbox_mem_alloced.va,
                            phba->ctrl.mbox_mem_alloced.dma);
-       if (phba->boot_kset)
-               iscsi_boot_destroy_kset(phba->boot_kset);
+       iscsi_boot_destroy_kset(phba->boot_kset);
        iscsi_host_remove(phba->shost);
        pci_dev_put(phba->pcidev);
        iscsi_host_free(phba->shost);
@@ -4310,11 +4311,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
                goto free_blkenbld;
        }
        hwi_enable_intr(phba);
-       ret = beiscsi_get_boot_info(phba);
-       if (ret < 0) {
-               shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
-                            "No Boot Devices !!!!!\n");
-       }
+
+       if (beiscsi_setup_boot_info(phba))
+               /*
+                * log error but continue, because we may not be using
+                * iscsi boot.
+                */
+               shost_printk(KERN_ERR, phba->shost, "Could not set up "
+                            "iSCSI boot info.");
+
        SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
        return 0;
 
index 4ce6f49..475cf92 100644 (file)
@@ -1,6 +1,6 @@
 obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
 
-bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
+bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
 bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
index 7be6b5a..3b0af11 100644 (file)
@@ -27,7 +27,6 @@
 struct bfa_s;
 
 typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
 
 /*
  * Interrupt message handlers
@@ -54,7 +53,8 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
         ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
                   + bfa_reqq_pi((__bfa), (__reqq)))))
 
-#define bfa_reqq_produce(__bfa, __reqq)        do {                            \
+#define bfa_reqq_produce(__bfa, __reqq, __mh)  do {                    \
+               (__mh).mtag.h2i.qid     = (__bfa)->iocfc.hw_qid[__reqq];\
                (__bfa)->iocfc.req_cq_pi[__reqq]++;                     \
                (__bfa)->iocfc.req_cq_pi[__reqq] &=                     \
                        ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
@@ -75,16 +75,6 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
        (__index) &= ((__size) - 1);                    \
 } while (0)
 
-/*
- * Queue element to wait for room in request queue. FIFO order is
- * maintained when fullfilling requests.
- */
-struct bfa_reqq_wait_s {
-       struct list_head        qe;
-       void            (*qresume) (void *cbarg);
-       void            *cbarg;
-};
-
 /*
  * Circular queue usage assignments
  */
@@ -128,18 +118,6 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 
 #define bfa_reqq_wcancel(__wqe)        list_del(&(__wqe)->qe)
 
-
-/*
- * Generic BFA callback element.
- */
-struct bfa_cb_qe_s {
-       struct list_head         qe;
-       bfa_cb_cbfn_t  cbfn;
-       bfa_boolean_t   once;
-       u32             rsvd;
-       void           *cbarg;
-};
-
 #define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do {    \
                (__hcb_qe)->cbfn  = (__cbfn);      \
                (__hcb_qe)->cbarg = (__cbarg);      \
@@ -172,44 +150,14 @@ struct bfa_pciid_s {
 
 extern char     bfa_version[];
 
-/*
- * BFA memory resources
- */
-enum bfa_mem_type {
-       BFA_MEM_TYPE_KVA = 1,   /*  Kernel Virtual Memory *(non-dma-able) */
-       BFA_MEM_TYPE_DMA = 2,   /*  DMA-able memory */
-       BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
-};
-
-struct bfa_mem_elem_s {
-       enum bfa_mem_type mem_type;     /* see enum bfa_mem_type */
-       u32     mem_len;        /*  Total Length in Bytes       */
-       u8              *kva;           /*  kernel virtual address      */
-       u64     dma;            /*  dma address if DMA memory   */
-       u8              *kva_curp;      /*  kva allocation cursor       */
-       u64     dma_curp;       /*  dma allocation cursor       */
-};
-
-struct bfa_meminfo_s {
-       struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
-};
-#define bfa_meminfo_kva(_m)                            \
-       ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
-#define bfa_meminfo_dma_virt(_m)                       \
-       ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
-#define bfa_meminfo_dma_phys(_m)                       \
-       ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
-
 struct bfa_iocfc_regs_s {
        void __iomem    *intr_status;
        void __iomem    *intr_mask;
        void __iomem    *cpe_q_pi[BFI_IOC_MAX_CQS];
        void __iomem    *cpe_q_ci[BFI_IOC_MAX_CQS];
-       void __iomem    *cpe_q_depth[BFI_IOC_MAX_CQS];
        void __iomem    *cpe_q_ctrl[BFI_IOC_MAX_CQS];
        void __iomem    *rme_q_ci[BFI_IOC_MAX_CQS];
        void __iomem    *rme_q_pi[BFI_IOC_MAX_CQS];
-       void __iomem    *rme_q_depth[BFI_IOC_MAX_CQS];
        void __iomem    *rme_q_ctrl[BFI_IOC_MAX_CQS];
 };
 
@@ -231,25 +179,55 @@ struct bfa_hwif_s {
        void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
        void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
        void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
-       void (*hw_msix_install)(struct bfa_s *bfa);
+       void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
+       void (*hw_msix_queue_install)(struct bfa_s *bfa);
        void (*hw_msix_uninstall)(struct bfa_s *bfa);
        void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
        void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
                                u32 *nvecs, u32 *maxvec);
        void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
                                       u32 *end);
+       int     cpe_vec_q0;
+       int     rme_vec_q0;
 };
 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
 
+struct bfa_faa_cbfn_s {
+       bfa_cb_iocfc_t  faa_cbfn;
+       void            *faa_cbarg;
+};
+
+#define BFA_FAA_ENABLED                1
+#define BFA_FAA_DISABLED       2
+
+/*
+ *     FAA attributes
+ */
+struct bfa_faa_attr_s {
+       wwn_t   faa;
+       u8      faa_state;
+       u8      pwwn_source;
+       u8      rsvd[6];
+};
+
+struct bfa_faa_args_s {
+       struct bfa_faa_attr_s   *faa_attr;
+       struct bfa_faa_cbfn_s   faa_cb;
+       u8                      faa_state;
+       bfa_boolean_t           busy;
+};
+
 struct bfa_iocfc_s {
        struct bfa_s            *bfa;
        struct bfa_iocfc_cfg_s  cfg;
        int                     action;
        u32             req_cq_pi[BFI_IOC_MAX_CQS];
        u32             rsp_cq_ci[BFI_IOC_MAX_CQS];
+       u8              hw_qid[BFI_IOC_MAX_CQS];
        struct bfa_cb_qe_s      init_hcb_qe;
        struct bfa_cb_qe_s      stop_hcb_qe;
        struct bfa_cb_qe_s      dis_hcb_qe;
+       struct bfa_cb_qe_s      en_hcb_qe;
        struct bfa_cb_qe_s      stats_hcb_qe;
        bfa_boolean_t           cfgdone;
 
@@ -257,7 +235,6 @@ struct bfa_iocfc_s {
        struct bfi_iocfc_cfg_s *cfginfo;
        struct bfa_dma_s        cfgrsp_dma;
        struct bfi_iocfc_cfgrsp_s *cfgrsp;
-       struct bfi_iocfc_cfg_reply_s *cfg_reply;
        struct bfa_dma_s        req_cq_ba[BFI_IOC_MAX_CQS];
        struct bfa_dma_s        req_cq_shadow_ci[BFI_IOC_MAX_CQS];
        struct bfa_dma_s        rsp_cq_ba[BFI_IOC_MAX_CQS];
@@ -267,18 +244,42 @@ struct bfa_iocfc_s {
        bfa_cb_iocfc_t          updateq_cbfn; /*  bios callback function */
        void                    *updateq_cbarg; /*  bios callback arg */
        u32     intr_mask;
+       struct bfa_faa_args_s   faa_args;
+       struct bfa_mem_dma_s    ioc_dma;
+       struct bfa_mem_dma_s    iocfc_dma;
+       struct bfa_mem_dma_s    reqq_dma[BFI_IOC_MAX_CQS];
+       struct bfa_mem_dma_s    rspq_dma[BFI_IOC_MAX_CQS];
+       struct bfa_mem_kva_s    kva_seg;
 };
 
-#define bfa_lpuid(__bfa)                                               \
-       bfa_ioc_portid(&(__bfa)->ioc)
+#define BFA_MEM_IOC_DMA(_bfa)          (&((_bfa)->iocfc.ioc_dma))
+#define BFA_MEM_IOCFC_DMA(_bfa)                (&((_bfa)->iocfc.iocfc_dma))
+#define BFA_MEM_REQQ_DMA(_bfa, _qno)   (&((_bfa)->iocfc.reqq_dma[(_qno)]))
+#define BFA_MEM_RSPQ_DMA(_bfa, _qno)   (&((_bfa)->iocfc.rspq_dma[(_qno)]))
+#define BFA_MEM_IOCFC_KVA(_bfa)                (&((_bfa)->iocfc.kva_seg))
+
+#define bfa_fn_lpu(__bfa)      \
+       bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
 #define bfa_msix_init(__bfa, __nvecs)                                  \
        ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
-#define bfa_msix_install(__bfa)                                                \
-       ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
+#define bfa_msix_ctrl_install(__bfa)                                   \
+       ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
+#define bfa_msix_queue_install(__bfa)                                  \
+       ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
 #define bfa_msix_uninstall(__bfa)                                      \
        ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
-#define bfa_isr_mode_set(__bfa, __msix)                                        \
-       ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
+#define bfa_isr_rspq_ack(__bfa, __queue) do {                          \
+       if ((__bfa)->iocfc.hwif.hw_rspq_ack)                            \
+               (__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue);        \
+} while (0)
+#define bfa_isr_reqq_ack(__bfa, __queue) do {                          \
+       if ((__bfa)->iocfc.hwif.hw_reqq_ack)                            \
+               (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue);        \
+} while (0)
+#define bfa_isr_mode_set(__bfa, __msix) do {                           \
+       if ((__bfa)->iocfc.hwif.hw_isr_mode_set)                        \
+               (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix);     \
+} while (0)
 #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)           \
        ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap,           \
                                        __nvecs, __maxvec))
@@ -290,17 +291,17 @@ struct bfa_iocfc_s {
 /*
  * FC specific IOC functions.
  */
-void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                      u32 *dm_len);
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
+                       struct bfa_meminfo_s *meminfo,
+                       struct bfa_s *bfa);
 void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
                      struct bfa_iocfc_cfg_s *cfg,
-                     struct bfa_meminfo_s *meminfo,
                      struct bfa_pcidev_s *pcidev);
 void bfa_iocfc_init(struct bfa_s *bfa);
 void bfa_iocfc_start(struct bfa_s *bfa);
 void bfa_iocfc_stop(struct bfa_s *bfa);
 void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
-void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
 bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
 void bfa_iocfc_reset_queues(struct bfa_s *bfa);
 
@@ -310,10 +311,10 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
 void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
 
 void bfa_hwcb_reginit(struct bfa_s *bfa);
-void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwcb_msix_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
 void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
 void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
 void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -321,10 +322,12 @@ void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
 void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
                                 u32 *end);
 void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct2_reginit(struct bfa_s *bfa);
 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwct_msix_install(struct bfa_s *bfa);
+void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
 void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
 void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
 void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -377,7 +380,8 @@ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
 void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
 void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
 void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
-                        struct bfa_meminfo_s *meminfo);
+                       struct bfa_meminfo_s *meminfo,
+                       struct bfa_s *bfa);
 void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
                struct bfa_meminfo_s *meminfo,
                struct bfa_pcidev_s *pcidev);
index 91838c5..c38e589 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 
 BFA_TRC_FILE(HAL, CORE);
 
@@ -25,13 +25,14 @@ BFA_TRC_FILE(HAL, CORE);
  * BFA module list terminated by NULL
  */
 static struct bfa_module_s *hal_mods[] = {
+       &hal_mod_fcdiag,
        &hal_mod_sgpg,
        &hal_mod_fcport,
        &hal_mod_fcxp,
        &hal_mod_lps,
        &hal_mod_uf,
        &hal_mod_rport,
-       &hal_mod_fcpim,
+       &hal_mod_fcp,
        NULL
 };
 
@@ -41,7 +42,7 @@ static struct bfa_module_s *hal_mods[] = {
 static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
        bfa_isr_unhandled,      /* NONE */
        bfa_isr_unhandled,      /* BFI_MC_IOC */
-       bfa_isr_unhandled,      /* BFI_MC_DIAG */
+       bfa_fcdiag_intr,        /* BFI_MC_DIAG */
        bfa_isr_unhandled,      /* BFI_MC_FLASH */
        bfa_isr_unhandled,      /* BFI_MC_CEE */
        bfa_fcport_isr,         /* BFI_MC_FCPORT */
@@ -51,7 +52,7 @@ static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
        bfa_fcxp_isr,           /* BFI_MC_FCXP */
        bfa_lps_isr,            /* BFI_MC_LPS */
        bfa_rport_isr,          /* BFI_MC_RPORT */
-       bfa_itnim_isr,          /* BFI_MC_ITNIM */
+       bfa_itn_isr,            /* BFI_MC_ITN */
        bfa_isr_unhandled,      /* BFI_MC_IOIM_READ */
        bfa_isr_unhandled,      /* BFI_MC_IOIM_WRITE */
        bfa_isr_unhandled,      /* BFI_MC_IOIM_IO */
@@ -89,23 +90,78 @@ static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
 
 
 static void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+bfa_com_port_attach(struct bfa_s *bfa)
 {
        struct bfa_port_s       *port = &bfa->modules.port;
-       u32                     dm_len;
-       u8                      *dm_kva;
-       u64                     dm_pa;
+       struct bfa_mem_dma_s    *port_dma = BFA_MEM_PORT_DMA(bfa);
 
-       dm_len = bfa_port_meminfo();
-       dm_kva = bfa_meminfo_dma_virt(mi);
-       dm_pa  = bfa_meminfo_dma_phys(mi);
-
-       memset(port, 0, sizeof(struct bfa_port_s));
        bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
-       bfa_port_mem_claim(port, dm_kva, dm_pa);
+       bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
+}
+
+/*
+ * ablk module attach
+ */
+static void
+bfa_com_ablk_attach(struct bfa_s *bfa)
+{
+       struct bfa_ablk_s       *ablk = &bfa->modules.ablk;
+       struct bfa_mem_dma_s    *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+
+       bfa_ablk_attach(ablk, &bfa->ioc);
+       bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
+}
+
+static void
+bfa_com_cee_attach(struct bfa_s *bfa)
+{
+       struct bfa_cee_s        *cee = &bfa->modules.cee;
+       struct bfa_mem_dma_s    *cee_dma = BFA_MEM_CEE_DMA(bfa);
+
+       cee->trcmod = bfa->trcmod;
+       bfa_cee_attach(cee, &bfa->ioc, bfa);
+       bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
+}
+
+static void
+bfa_com_sfp_attach(struct bfa_s *bfa)
+{
+       struct bfa_sfp_s        *sfp = BFA_SFP_MOD(bfa);
+       struct bfa_mem_dma_s    *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+
+       bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
+       bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
+}
+
+static void
+bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+       struct bfa_flash_s      *flash = BFA_FLASH(bfa);
+       struct bfa_mem_dma_s    *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+
+       bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+       bfa_flash_memclaim(flash, flash_dma->kva_curp,
+                          flash_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_diag_attach(struct bfa_s *bfa)
+{
+       struct bfa_diag_s       *diag = BFA_DIAG_MOD(bfa);
+       struct bfa_mem_dma_s    *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+
+       bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
+       bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
+}
+
+static void
+bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+       struct bfa_phy_s        *phy = BFA_PHY(bfa);
+       struct bfa_mem_dma_s    *phy_dma = BFA_MEM_PHY_DMA(bfa);
 
-       bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
-       bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+       bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+       bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
 }
 
 /*
@@ -122,6 +178,7 @@ enum {
        BFA_IOCFC_ACT_INIT      = 1,
        BFA_IOCFC_ACT_STOP      = 2,
        BFA_IOCFC_ACT_DISABLE   = 3,
+       BFA_IOCFC_ACT_ENABLE    = 4,
 };
 
 #define DEF_CFG_NUM_FABRICS            1
@@ -173,10 +230,92 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
        }
 }
 
+static inline void
+bfa_isr_rspq(struct bfa_s *bfa, int qid)
+{
+       struct bfi_msg_s *m;
+       u32     pi, ci;
+       struct list_head *waitq;
+
+       bfa_isr_rspq_ack(bfa, qid);
+
+       ci = bfa_rspq_ci(bfa, qid);
+       pi = bfa_rspq_pi(bfa, qid);
+
+       while (ci != pi) {
+               m = bfa_rspq_elem(bfa, qid, ci);
+               WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
+
+               bfa_isrs[m->mhdr.msg_class] (bfa, m);
+               CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
+       }
+
+       /*
+        * update CI
+        */
+       bfa_rspq_ci(bfa, qid) = pi;
+       writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
+       mmiowb();
+
+       /*
+        * Resume any pending requests in the corresponding reqq.
+        */
+       waitq = bfa_reqq(bfa, qid);
+       if (!list_empty(waitq))
+               bfa_reqq_resume(bfa, qid);
+}
+
+static inline void
+bfa_isr_reqq(struct bfa_s *bfa, int qid)
+{
+       struct list_head *waitq;
+
+       bfa_isr_reqq_ack(bfa, qid);
+
+       /*
+        * Resume any pending requests in the corresponding reqq.
+        */
+       waitq = bfa_reqq(bfa, qid);
+       if (!list_empty(waitq))
+               bfa_reqq_resume(bfa, qid);
+}
+
 void
 bfa_msix_all(struct bfa_s *bfa, int vec)
 {
-       bfa_intx(bfa);
+       u32     intr, qintr;
+       int     queue;
+
+       intr = readl(bfa->iocfc.bfa_regs.intr_status);
+       if (!intr)
+               return;
+
+       /*
+        * RME completion queue interrupt
+        */
+       qintr = intr & __HFN_INT_RME_MASK;
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_rspq(bfa, queue);
+       }
+
+       intr &= ~qintr;
+       if (!intr)
+               return;
+
+       /*
+        * CPE completion queue interrupt
+        */
+       qintr = intr & __HFN_INT_CPE_MASK;
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_reqq(bfa, queue);
+       }
+       intr &= ~qintr;
+       if (!intr)
+               return;
+
+       bfa_msix_lpu_err(bfa, intr);
 }
 
 bfa_boolean_t
@@ -189,16 +328,19 @@ bfa_intx(struct bfa_s *bfa)
        if (!intr)
                return BFA_FALSE;
 
+       qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
+       if (qintr)
+               writel(qintr, bfa->iocfc.bfa_regs.intr_status);
+
        /*
         * RME completion queue interrupt
         */
        qintr = intr & __HFN_INT_RME_MASK;
-       writel(qintr, bfa->iocfc.bfa_regs.intr_status);
-
-       for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
-               if (intr & (__HFN_INT_RME_Q0 << queue))
-                       bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_rspq(bfa, queue);
        }
+
        intr &= ~qintr;
        if (!intr)
                return BFA_TRUE;
@@ -207,11 +349,9 @@ bfa_intx(struct bfa_s *bfa)
         * CPE completion queue interrupt
         */
        qintr = intr & __HFN_INT_CPE_MASK;
-       writel(qintr, bfa->iocfc.bfa_regs.intr_status);
-
-       for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
-               if (intr & (__HFN_INT_CPE_Q0 << queue))
-                       bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_reqq(bfa, queue);
        }
        intr &= ~qintr;
        if (!intr)
@@ -225,32 +365,25 @@ bfa_intx(struct bfa_s *bfa)
 void
 bfa_isr_enable(struct bfa_s *bfa)
 {
-       u32 intr_unmask;
+       u32 umsk;
        int pci_func = bfa_ioc_pcifn(&bfa->ioc);
 
        bfa_trc(bfa, pci_func);
 
-       bfa_msix_install(bfa);
-       intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-                      __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
-                      __HFN_INT_LL_HALT);
-
-       if (pci_func == 0)
-               intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
-                               __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
-                               __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
-                               __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
-                               __HFN_INT_MBOX_LPU0);
-       else
-               intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
-                               __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
-                               __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
-                               __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
-                               __HFN_INT_MBOX_LPU1);
-
-       writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
-       writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
-       bfa->iocfc.intr_mask = ~intr_unmask;
+       bfa_msix_ctrl_install(bfa);
+
+       if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+               umsk = __HFN_INT_ERR_MASK_CT2;
+               umsk |= pci_func == 0 ?
+                       __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
+       } else {
+               umsk = __HFN_INT_ERR_MASK;
+               umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
+       }
+
+       writel(umsk, bfa->iocfc.bfa_regs.intr_status);
+       writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
+       bfa->iocfc.intr_mask = ~umsk;
        bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
 }
 
@@ -263,20 +396,9 @@ bfa_isr_disable(struct bfa_s *bfa)
 }
 
 void
-bfa_msix_reqq(struct bfa_s *bfa, int qid)
+bfa_msix_reqq(struct bfa_s *bfa, int vec)
 {
-       struct list_head *waitq;
-
-       qid &= (BFI_IOC_MAX_CQS - 1);
-
-       bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
-
-       /*
-        * Resume any pending requests in the corresponding reqq.
-        */
-       waitq = bfa_reqq(bfa, qid);
-       if (!list_empty(waitq))
-               bfa_reqq_resume(bfa, qid);
+       bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
 }
 
 void
@@ -290,57 +412,37 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
 }
 
 void
-bfa_msix_rspq(struct bfa_s *bfa, int qid)
+bfa_msix_rspq(struct bfa_s *bfa, int vec)
 {
-       struct bfi_msg_s *m;
-       u32 pi, ci;
-       struct list_head *waitq;
-
-       qid &= (BFI_IOC_MAX_CQS - 1);
-
-       bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
-
-       ci = bfa_rspq_ci(bfa, qid);
-       pi = bfa_rspq_pi(bfa, qid);
-
-       if (bfa->rme_process) {
-               while (ci != pi) {
-                       m = bfa_rspq_elem(bfa, qid, ci);
-                       bfa_isrs[m->mhdr.msg_class] (bfa, m);
-                       CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
-               }
-       }
-
-       /*
-        * update CI
-        */
-       bfa_rspq_ci(bfa, qid) = pi;
-       writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
-       mmiowb();
-
-       /*
-        * Resume any pending requests in the corresponding reqq.
-        */
-       waitq = bfa_reqq(bfa, qid);
-       if (!list_empty(waitq))
-               bfa_reqq_resume(bfa, qid);
+       bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
 }
 
 void
 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 {
        u32 intr, curr_value;
+       bfa_boolean_t lpu_isr, halt_isr, pss_isr;
 
        intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
-       if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
-               bfa_ioc_mbox_isr(&bfa->ioc);
+       if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+               halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
+               pss_isr  = intr & __HFN_INT_ERR_PSS_CT2;
+               lpu_isr  = intr & (__HFN_INT_MBOX_LPU0_CT2 |
+                                  __HFN_INT_MBOX_LPU1_CT2);
+               intr    &= __HFN_INT_ERR_MASK_CT2;
+       } else {
+               halt_isr = intr & __HFN_INT_LL_HALT;
+               pss_isr  = intr & __HFN_INT_ERR_PSS;
+               lpu_isr  = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
+               intr    &= __HFN_INT_ERR_MASK;
+       }
 
-       intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-               __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
+       if (lpu_isr)
+               bfa_ioc_mbox_isr(&bfa->ioc);
 
        if (intr) {
-               if (intr & __HFN_INT_LL_HALT) {
+               if (halt_isr) {
                        /*
                         * If LL_HALT bit is set then FW Init Halt LL Port
                         * Register needs to be cleared as well so Interrupt
@@ -351,7 +453,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
                        writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
                }
 
-               if (intr & __HFN_INT_ERR_PSS) {
+               if (pss_isr) {
                        /*
                         * ERR_PSS bit needs to be cleared as well in case
                         * interrups are shared so driver's interrupt handler is
@@ -359,7 +461,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
                         */
                        curr_value = readl(
                                        bfa->ioc.ioc_regs.pss_err_status_reg);
-                       curr_value &= __PSS_ERR_STATUS_SET;
                        writel(curr_value,
                                bfa->ioc.ioc_regs.pss_err_status_reg);
                }
@@ -377,41 +478,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
  *  BFA IOC private functions
  */
 
-static void
-bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
-       int             i, per_reqq_sz, per_rspq_sz;
-
-       per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
-       per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
-
-       /*
-        * Calculate CQ size
-        */
-       for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-               *dm_len = *dm_len + per_reqq_sz;
-               *dm_len = *dm_len + per_rspq_sz;
-       }
-
-       /*
-        * Calculate Shadow CI/PI size
-        */
-       for (i = 0; i < cfg->fwcfg.num_cqs; i++)
-               *dm_len += (2 * BFA_CACHELINE_SZ);
-}
-
-static void
-bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
-       *dm_len +=
-               BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
-       *dm_len +=
-               BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-                           BFA_CACHELINE_SZ);
-}
-
 /*
  * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  */
@@ -433,8 +499,13 @@ bfa_iocfc_send_cfg(void *bfa_arg)
        /*
         * initialize IOC configuration info
         */
+       cfg_info->single_msix_vec = 0;
+       if (bfa->msix.nvecs == 1)
+               cfg_info->single_msix_vec = 1;
        cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
        cfg_info->num_cqs = cfg->fwcfg.num_cqs;
+       cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
+       cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
 
        bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
        /*
@@ -469,7 +540,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
         * dma map IOC configuration itself
         */
        bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
-                   bfa_lpuid(bfa));
+                   bfa_fn_lpu(bfa));
        bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
 
        bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
@@ -491,26 +562,40 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        /*
         * Initialize chip specific handlers.
         */
-       if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
+       if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
                iocfc->hwif.hw_reginit = bfa_hwct_reginit;
                iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
                iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
                iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
-               iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
+               iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
+               iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
                iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
                iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
                iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
                iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
+               iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
+               iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
        } else {
                iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
-               iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
-               iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+               iocfc->hwif.hw_reqq_ack = NULL;
+               iocfc->hwif.hw_rspq_ack = NULL;
                iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
-               iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
+               iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
+               iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
                iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
                iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
                iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
                iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
+               iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
+                       bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+               iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
+                       bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+       }
+
+       if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
+               iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
+               iocfc->hwif.hw_isr_mode_set = NULL;
+               iocfc->hwif.hw_rspq_ack = NULL;
        }
 
        iocfc->hwif.hw_reginit(bfa);
@@ -518,48 +603,42 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 }
 
 static void
-bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
-                   struct bfa_meminfo_s *meminfo)
+bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
 {
-       u8             *dm_kva;
-       u64     dm_pa;
-       int             i, per_reqq_sz, per_rspq_sz;
+       u8      *dm_kva = NULL;
+       u64     dm_pa = 0;
+       int     i, per_reqq_sz, per_rspq_sz, dbgsz;
        struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
-       int             dbgsz;
-
-       dm_kva = bfa_meminfo_dma_virt(meminfo);
-       dm_pa = bfa_meminfo_dma_phys(meminfo);
+       struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+       struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+       struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
 
-       /*
-        * First allocate dma memory for IOC.
-        */
-       bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
-       dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
-       dm_pa  += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+       /* First allocate dma memory for IOC */
+       bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
+                       bfa_mem_dma_phys(ioc_dma));
 
-       /*
-        * Claim DMA-able memory for the request/response queues and for shadow
-        * ci/pi registers
-        */
+       /* Claim DMA-able memory for the request/response queues */
        per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
+                               BFA_DMA_ALIGN_SZ);
        per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
+                               BFA_DMA_ALIGN_SZ);
 
        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-               iocfc->req_cq_ba[i].kva = dm_kva;
-               iocfc->req_cq_ba[i].pa = dm_pa;
-               memset(dm_kva, 0, per_reqq_sz);
-               dm_kva += per_reqq_sz;
-               dm_pa += per_reqq_sz;
-
-               iocfc->rsp_cq_ba[i].kva = dm_kva;
-               iocfc->rsp_cq_ba[i].pa = dm_pa;
-               memset(dm_kva, 0, per_rspq_sz);
-               dm_kva += per_rspq_sz;
-               dm_pa += per_rspq_sz;
+               reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
+               iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
+               iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
+               memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
+
+               rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
+               iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
+               iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
+               memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
        }
 
+       /* Claim IOCFC dma memory - for shadow CI/PI */
+       dm_kva = bfa_mem_dma_virt(iocfc_dma);
+       dm_pa  = bfa_mem_dma_phys(iocfc_dma);
+
        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
                iocfc->req_cq_shadow_ci[i].kva = dm_kva;
                iocfc->req_cq_shadow_ci[i].pa = dm_pa;
@@ -572,36 +651,27 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
                dm_pa += BFA_CACHELINE_SZ;
        }
 
-       /*
-        * Claim DMA-able memory for the config info page
-        */
+       /* Claim IOCFC dma memory - for the config info page */
        bfa->iocfc.cfg_info.kva = dm_kva;
        bfa->iocfc.cfg_info.pa = dm_pa;
        bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
        dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
        dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
 
-       /*
-        * Claim DMA-able memory for the config response
-        */
+       /* Claim IOCFC dma memory - for the config response */
        bfa->iocfc.cfgrsp_dma.kva = dm_kva;
        bfa->iocfc.cfgrsp_dma.pa = dm_pa;
        bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
-
-       dm_kva +=
-               BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-                           BFA_CACHELINE_SZ);
+       dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+                       BFA_CACHELINE_SZ);
        dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-                            BFA_CACHELINE_SZ);
-
-
-       bfa_meminfo_dma_virt(meminfo) = dm_kva;
-       bfa_meminfo_dma_phys(meminfo) = dm_pa;
+                       BFA_CACHELINE_SZ);
 
+       /* Claim IOCFC kva memory */
        dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
        if (dbgsz > 0) {
-               bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
-               bfa_meminfo_kva(meminfo) += dbgsz;
+               bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+               bfa_mem_kva_curp(iocfc) += dbgsz;
        }
 }
 
@@ -613,7 +683,9 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
 {
        int             i;
 
-       bfa->rme_process = BFA_TRUE;
+       bfa->queue_process = BFA_TRUE;
+       for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+               bfa_isr_rspq_ack(bfa, i);
 
        for (i = 0; hal_mods[i]; i++)
                hal_mods[i]->start(bfa);
@@ -659,6 +731,16 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
                bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
 }
 
+static void
+bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+       struct bfa_s    *bfa = bfa_arg;
+       struct bfad_s *bfad = bfa->bfad;
+
+       if (compl)
+               complete(&bfad->enable_comp);
+}
+
 static void
 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
 {
@@ -669,6 +751,37 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
                complete(&bfad->disable_comp);
 }
 
+/**
+ * configure queue registers from firmware response
+ */
+static void
+bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
+{
+       int     i;
+       struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
+       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+       for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+               bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
+               r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
+               r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
+               r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
+               r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
+               r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
+               r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
+       }
+}
+
+static void
+bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
+{
+       bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
+       bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
+       bfa_rport_res_recfg(bfa, fwcfg->num_rports);
+       bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
+       bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
+}
+
 /*
  * Update BFA configuration from firmware configuration.
  */
@@ -681,6 +794,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
 
        fwcfg->num_cqs        = fwcfg->num_cqs;
        fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
+       fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
        fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
        fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
        fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
@@ -688,6 +802,21 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
 
        iocfc->cfgdone = BFA_TRUE;
 
+       /*
+        * configure queue register offsets as learnt from firmware
+        */
+       bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
+
+       /*
+        * Re-configure resources as learnt from Firmware
+        */
+       bfa_iocfc_res_recfg(bfa, fwcfg);
+
+       /*
+        * Install MSIX queue handlers
+        */
+       bfa_msix_queue_install(bfa);
+
        /*
         * Configuration is complete - initialize/start submodules
         */
@@ -695,8 +824,12 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
 
        if (iocfc->action == BFA_IOCFC_ACT_INIT)
                bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
-       else
+       else {
+               if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
+                       bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
+                                       bfa_iocfc_enable_cb, bfa);
                bfa_iocfc_start_submod(bfa);
+       }
 }
 void
 bfa_iocfc_reset_queues(struct bfa_s *bfa)
@@ -711,6 +844,181 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
        }
 }
 
+/* Fabric Assigned Address specific functions */
+
+/*
+ *     Check whether IOC is ready before sending command down
+ */
+static bfa_status_t
+bfa_faa_validate_request(struct bfa_s *bfa)
+{
+       enum bfa_ioc_type_e     ioc_type = bfa_get_type(bfa);
+       u32     card_type = bfa->ioc.attr->card_type;
+
+       if (bfa_ioc_is_operational(&bfa->ioc)) {
+               if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
+                       return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+       } else {
+               if (!bfa_ioc_is_acq_addr(&bfa->ioc))
+                       return BFA_STATUS_IOC_NON_OP;
+       }
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+       struct bfi_faa_en_dis_s faa_enable_req;
+       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+       bfa_status_t            status;
+
+       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+       status = bfa_faa_validate_request(bfa);
+       if (status != BFA_STATUS_OK)
+               return status;
+
+       if (iocfc->faa_args.busy == BFA_TRUE)
+               return BFA_STATUS_DEVBUSY;
+
+       if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
+               return BFA_STATUS_FAA_ENABLED;
+
+       if (bfa_fcport_is_trunk_enabled(bfa))
+               return BFA_STATUS_ERROR_TRUNK_ENABLED;
+
+       bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
+       iocfc->faa_args.busy = BFA_TRUE;
+
+       memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
+       bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
+               BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
+
+       bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
+                       sizeof(struct bfi_faa_en_dis_s));
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
+               void *cbarg)
+{
+       struct bfi_faa_en_dis_s faa_disable_req;
+       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+       bfa_status_t            status;
+
+       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+       status = bfa_faa_validate_request(bfa);
+       if (status != BFA_STATUS_OK)
+               return status;
+
+       if (iocfc->faa_args.busy == BFA_TRUE)
+               return BFA_STATUS_DEVBUSY;
+
+       if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
+               return BFA_STATUS_FAA_DISABLED;
+
+       bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
+       iocfc->faa_args.busy = BFA_TRUE;
+
+       memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
+       bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
+               BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
+
+       bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
+               sizeof(struct bfi_faa_en_dis_s));
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+               bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+       struct bfi_faa_query_s  faa_attr_req;
+       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+       bfa_status_t            status;
+
+       iocfc->faa_args.faa_attr = attr;
+       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+       status = bfa_faa_validate_request(bfa);
+       if (status != BFA_STATUS_OK)
+               return status;
+
+       if (iocfc->faa_args.busy == BFA_TRUE)
+               return BFA_STATUS_DEVBUSY;
+
+       iocfc->faa_args.busy = BFA_TRUE;
+       memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
+       bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
+               BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
+
+       bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
+               sizeof(struct bfi_faa_query_s));
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     FAA enable response
+ */
+static void
+bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
+               struct bfi_faa_en_dis_rsp_s *rsp)
+{
+       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+       bfa_status_t    status = rsp->status;
+
+       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
+       iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ *     FAA disable response
+ */
+static void
+bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
+               struct bfi_faa_en_dis_rsp_s *rsp)
+{
+       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+       bfa_status_t    status = rsp->status;
+
+       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
+       iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ *     FAA query response
+ */
+static void
+bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
+               bfi_faa_query_rsp_t *rsp)
+{
+       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+
+       if (iocfc->faa_args.faa_attr) {
+               iocfc->faa_args.faa_attr->faa = rsp->faa;
+               iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
+               iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
+       }
+
+       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
+       iocfc->faa_args.busy = BFA_FALSE;
+}
+
 /*
  * IOC enable request is complete
  */
@@ -719,11 +1027,20 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
 {
        struct bfa_s    *bfa = bfa_arg;
 
+       if (status == BFA_STATUS_FAA_ACQ_ADDR) {
+               bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+                               bfa_iocfc_init_cb, bfa);
+               return;
+       }
+
        if (status != BFA_STATUS_OK) {
                bfa_isr_disable(bfa);
                if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
                        bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
                                     bfa_iocfc_init_cb, bfa);
+               else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
+                       bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
+                                       bfa_iocfc_enable_cb, bfa);
                return;
        }
 
@@ -759,7 +1076,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
 {
        struct bfa_s    *bfa = bfa_arg;
 
-       bfa->rme_process = BFA_FALSE;
+       bfa->queue_process = BFA_FALSE;
 
        bfa_isr_disable(bfa);
        bfa_iocfc_disable_submod(bfa);
@@ -786,15 +1103,47 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
  * Query IOC memory requirement information.
  */
 void
-bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                 u32 *dm_len)
+bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+                 struct bfa_s *bfa)
 {
-       /* dma memory for IOC */
-       *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+       int q, per_reqq_sz, per_rspq_sz;
+       struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+       struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+       struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
+       u32     dm_len = 0;
+
+       /* dma memory setup for IOC */
+       bfa_mem_dma_setup(meminfo, ioc_dma,
+               BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
+
+       /* dma memory setup for REQ/RSP queues */
+       per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+                               BFA_DMA_ALIGN_SZ);
+       per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+                               BFA_DMA_ALIGN_SZ);
+
+       for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
+               bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
+                               per_reqq_sz);
+               bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
+                               per_rspq_sz);
+       }
+
+       /* IOCFC dma memory - calculate Shadow CI/PI size */
+       for (q = 0; q < cfg->fwcfg.num_cqs; q++)
+               dm_len += (2 * BFA_CACHELINE_SZ);
+
+       /* IOCFC dma memory - calculate config info / rsp size */
+       dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+       dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+                       BFA_CACHELINE_SZ);
 
-       bfa_iocfc_fw_cfg_sz(cfg, dm_len);
-       bfa_iocfc_cqs_sz(cfg, dm_len);
-       *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+       /* dma memory setup for IOCFC */
+       bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
+
+       /* kva memory setup for IOCFC */
+       bfa_mem_kva_setup(meminfo, iocfc_kva,
+                       ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
 }
 
 /*
@@ -802,7 +1151,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  */
 void
 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+                struct bfa_pcidev_s *pcidev)
 {
        int             i;
        struct bfa_ioc_s *ioc = &bfa->ioc;
@@ -815,17 +1164,11 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        ioc->trcmod = bfa->trcmod;
        bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
 
-       /*
-        * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
-        */
-       if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
-               bfa_ioc_set_fcmode(&bfa->ioc);
-
-       bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
+       bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
        bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
 
        bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
-       bfa_iocfc_mem_claim(bfa, cfg, meminfo);
+       bfa_iocfc_mem_claim(bfa, cfg);
        INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
 
        INIT_LIST_HEAD(&bfa->comp_q);
@@ -863,7 +1206,7 @@ bfa_iocfc_stop(struct bfa_s *bfa)
 {
        bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
 
-       bfa->rme_process = BFA_FALSE;
+       bfa->queue_process = BFA_FALSE;
        bfa_ioc_disable(&bfa->ioc);
 }
 
@@ -879,12 +1222,22 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
 
        switch (msg->mh.msg_id) {
        case BFI_IOCFC_I2H_CFG_REPLY:
-               iocfc->cfg_reply = &msg->cfg_reply;
                bfa_iocfc_cfgrsp(bfa);
                break;
        case BFI_IOCFC_I2H_UPDATEQ_RSP:
                iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
                break;
+       case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
+               bfa_faa_enable_reply(iocfc,
+                       (struct bfi_faa_en_dis_rsp_s *)msg);
+               break;
+       case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
+               bfa_faa_disable_reply(iocfc,
+                       (struct bfi_faa_en_dis_rsp_s *)msg);
+               break;
+       case BFI_IOCFC_I2H_FAA_QUERY_RSP:
+               bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
+               break;
        default:
                WARN_ON(1);
        }
@@ -926,7 +1279,7 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
                return BFA_STATUS_DEVBUSY;
 
        bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
-                   bfa_lpuid(bfa));
+                   bfa_fn_lpu(bfa));
        m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
        m->delay    = iocfc->cfginfo->intr_attr.delay;
        m->latency  = iocfc->cfginfo->intr_attr.latency;
@@ -934,17 +1287,17 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
        bfa_trc(bfa, attr->delay);
        bfa_trc(bfa, attr->latency);
 
-       bfa_reqq_produce(bfa, BFA_REQQ_IOC);
+       bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
        return BFA_STATUS_OK;
 }
 
 void
-bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
+bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
 {
        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
 
        iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
-       bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
+       bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
 }
 /*
  * Enable IOC after it is disabled.
@@ -954,6 +1307,7 @@ bfa_iocfc_enable(struct bfa_s *bfa)
 {
        bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
                     "IOC Enable");
+       bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
        bfa_ioc_enable(&bfa->ioc);
 }
 
@@ -964,7 +1318,7 @@ bfa_iocfc_disable(struct bfa_s *bfa)
                     "IOC Disable");
        bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
 
-       bfa->rme_process = BFA_FALSE;
+       bfa->queue_process = BFA_FALSE;
        bfa_ioc_disable(&bfa->ioc);
 }
 
@@ -1033,33 +1387,49 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  *                     starting address for each block and provide the same
  *                     structure as input parameter to bfa_attach() call.
  *
+ * @param[in] bfa -    pointer to the bfa structure, used while fetching the
+ *                     dma, kva memory information of the bfa sub-modules.
+ *
  * @return void
  *
  * Special Considerations: @note
  */
 void
-bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
+bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+               struct bfa_s *bfa)
 {
        int             i;
-       u32     km_len = 0, dm_len = 0;
+       struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
+       struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+       struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+       struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+       struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+       struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+       struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
 
        WARN_ON((cfg == NULL) || (meminfo == NULL));
 
        memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
-       meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
-               BFA_MEM_TYPE_KVA;
-       meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
-               BFA_MEM_TYPE_DMA;
 
-       bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
-
-       for (i = 0; hal_mods[i]; i++)
-               hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
+       /* Initialize the DMA & KVA meminfo queues */
+       INIT_LIST_HEAD(&meminfo->dma_info.qe);
+       INIT_LIST_HEAD(&meminfo->kva_info.qe);
 
-       dm_len += bfa_port_meminfo();
+       bfa_iocfc_meminfo(cfg, meminfo, bfa);
 
-       meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
-       meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
+       for (i = 0; hal_mods[i]; i++)
+               hal_mods[i]->meminfo(cfg, meminfo, bfa);
+
+       /* dma info setup */
+       bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
+       bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
+       bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
+       bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
+       bfa_mem_dma_setup(meminfo, flash_dma,
+                         bfa_flash_meminfo(cfg->drvcfg.min_cfg));
+       bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
+       bfa_mem_dma_setup(meminfo, phy_dma,
+                         bfa_phy_meminfo(cfg->drvcfg.min_cfg));
 }
 
 /*
@@ -1092,28 +1462,46 @@ void
 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
 {
-       int                     i;
-       struct bfa_mem_elem_s   *melem;
+       int     i;
+       struct bfa_mem_dma_s *dma_info, *dma_elem;
+       struct bfa_mem_kva_s *kva_info, *kva_elem;
+       struct list_head *dm_qe, *km_qe;
 
        bfa->fcs = BFA_FALSE;
 
        WARN_ON((cfg == NULL) || (meminfo == NULL));
 
-       /*
-        * initialize all memory pointers for iterative allocation
-        */
-       for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
-               melem = meminfo->meminfo + i;
-               melem->kva_curp = melem->kva;
-               melem->dma_curp = melem->dma;
+       /* Initialize memory pointers for iterative allocation */
+       dma_info = &meminfo->dma_info;
+       dma_info->kva_curp = dma_info->kva;
+       dma_info->dma_curp = dma_info->dma;
+
+       kva_info = &meminfo->kva_info;
+       kva_info->kva_curp = kva_info->kva;
+
+       list_for_each(dm_qe, &dma_info->qe) {
+               dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+               dma_elem->kva_curp = dma_elem->kva;
+               dma_elem->dma_curp = dma_elem->dma;
+       }
+
+       list_for_each(km_qe, &kva_info->qe) {
+               kva_elem = (struct bfa_mem_kva_s *) km_qe;
+               kva_elem->kva_curp = kva_elem->kva;
        }
 
-       bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
+       bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
 
        for (i = 0; hal_mods[i]; i++)
-               hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
-
-       bfa_com_port_attach(bfa, meminfo);
+               hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
+
+       bfa_com_port_attach(bfa);
+       bfa_com_ablk_attach(bfa);
+       bfa_com_cee_attach(bfa);
+       bfa_com_sfp_attach(bfa);
+       bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
+       bfa_com_diag_attach(bfa);
+       bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
 }
 
 /*
@@ -1215,6 +1603,7 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
        cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
        cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
        cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
+       cfg->fwcfg.num_fwtio_reqs = 0;
 
        cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
        cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
@@ -1236,6 +1625,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
        cfg->fwcfg.num_fcxp_reqs   = BFA_FCXP_MIN;
        cfg->fwcfg.num_uf_bufs     = BFA_UF_MIN;
        cfg->fwcfg.num_rports      = BFA_RPORT_MIN;
+       cfg->fwcfg.num_fwtio_reqs = 0;
 
        cfg->drvcfg.num_sgpgs      = BFA_SGPG_MIN;
        cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
index d85f93a..ed8d31b 100644 (file)
@@ -40,7 +40,12 @@ enum {
        BFA_MFG_TYPE_ASTRA    = 807,     /*  Astra mezz card            */
        BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*  Lightning mezz card - old  */
        BFA_MFG_TYPE_LIGHTNING = 1741,   /*  Lightning mezz card        */
-       BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type          */
+       BFA_MFG_TYPE_PROWLER_F = 1560,   /*  Prowler FC only cards      */
+       BFA_MFG_TYPE_PROWLER_N = 1410,   /*  Prowler NIC only cards     */
+       BFA_MFG_TYPE_PROWLER_C = 1710,   /*  Prowler CNA only cards     */
+       BFA_MFG_TYPE_PROWLER_D = 1860,   /*  Prowler Dual cards         */
+       BFA_MFG_TYPE_CHINOOK   = 1867,   /*  Chinook cards              */
+       BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type          */
 };
 
 #pragma pack(1)
@@ -53,7 +58,8 @@ enum {
        (type) == BFA_MFG_TYPE_WANCHESE || \
        (type) == BFA_MFG_TYPE_ASTRA || \
        (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
-       (type) == BFA_MFG_TYPE_LIGHTNING))
+       (type) == BFA_MFG_TYPE_LIGHTNING || \
+       (type) == BFA_MFG_TYPE_CHINOOK))
 
 /*
  * Check if the card having old wwn/mac handling
@@ -124,30 +130,53 @@ enum bfa_status {
        BFA_STATUS_ETIMER       = 5,    /*  Timer expired - Retry, if persists,
                                         *  contact support */
        BFA_STATUS_EPROTOCOL    = 6,    /*  Protocol error */
+       BFA_STATUS_SFP_UNSUPP   = 10,   /*  Unsupported SFP - Replace SFP */
+       BFA_STATUS_UNKNOWN_VFID = 11,   /*  VF_ID not found */
+       BFA_STATUS_DATACORRUPTED = 12,  /*  Diag returned data corrupted */
        BFA_STATUS_DEVBUSY      = 13,   /*  Device busy - Retry operation */
+       BFA_STATUS_HDMA_FAILED  = 16,   /* Host dma failed contact support */
+       BFA_STATUS_FLASH_BAD_LEN = 17,  /*  Flash bad length */
        BFA_STATUS_UNKNOWN_LWWN = 18,   /*  LPORT PWWN not found */
        BFA_STATUS_UNKNOWN_RWWN = 19,   /*  RPORT PWWN not found */
        BFA_STATUS_VPORT_EXISTS = 21,   /*  VPORT already exists */
        BFA_STATUS_VPORT_MAX    = 22,   /*  Reached max VPORT supported limit */
        BFA_STATUS_UNSUPP_SPEED = 23,   /*  Invalid Speed Check speed setting */
        BFA_STATUS_INVLD_DFSZ   = 24,   /*  Invalid Max data field size */
+       BFA_STATUS_CMD_NOTSUPP  = 26,   /*  Command/API not supported */
        BFA_STATUS_FABRIC_RJT   = 29,   /*  Reject from attached fabric */
+       BFA_STATUS_PORT_OFFLINE = 34,   /*  Port is not online */
        BFA_STATUS_VPORT_WWN_BP = 46,   /*  WWN is same as base port's WWN */
+       BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
        BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
        BFA_STATUS_IOC_FAILURE  = 56,   /* IOC failure - Retry, if persists
                                         * contact support */
        BFA_STATUS_INVALID_WWN  = 57,   /*  Invalid WWN */
+       BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */
+       BFA_STATUS_IOC_NON_OP   = 61,   /* IOC is not operational */
+       BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */
        BFA_STATUS_DIAG_BUSY    = 71,   /*  diag busy */
+       BFA_STATUS_BEACON_ON    = 72,   /* Port Beacon already on */
        BFA_STATUS_ENOFSAVE     = 78,   /*  No saved firmware trace */
        BFA_STATUS_IOC_DISABLED = 82,   /* IOC is already disabled */
+       BFA_STATUS_NO_SFP_DEV = 89,     /* No SFP device check or replace SFP */
+       BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
+       BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
        BFA_STATUS_INVALID_MAC  = 134, /*  Invalid MAC address */
        BFA_STATUS_PBC          = 154, /*  Operation not allowed for pre-boot
                                        *  configuration */
+       BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
        BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
                                         * this adapter */
        BFA_STATUS_TRUNK_DISABLED  = 165, /* Trunking is disabled on
                                           * the adapter */
        BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
+       BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
+       BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
+       BFA_STATUS_FAA_ENABLED = 197,   /* FAA is already enabled */
+       BFA_STATUS_FAA_DISABLED = 198,  /* FAA is already disabled */
+       BFA_STATUS_FAA_ACQUIRED = 199,  /* FAA is already acquired */
+       BFA_STATUS_FAA_ACQ_ADDR = 200,  /* Acquiring addr */
+       BFA_STATUS_ERROR_TRUNK_ENABLED = 203,   /* Trunk enabled on adapter */
        BFA_STATUS_MAX_VAL              /* Unknown error code */
 };
 #define bfa_status_t enum bfa_status
@@ -265,6 +294,8 @@ enum bfa_ioc_state {
        BFA_IOC_DISABLED        = 10,   /*  IOC is disabled */
        BFA_IOC_FWMISMATCH      = 11,   /*  IOC f/w different from drivers */
        BFA_IOC_ENABLING        = 12,   /*  IOC is being enabled */
+       BFA_IOC_HWFAIL          = 13,   /*  PCI mapping doesn't exist */
+       BFA_IOC_ACQ_ADDR        = 14,   /*  Acquiring addr from fabric */
 };
 
 /*
@@ -294,6 +325,7 @@ struct bfa_ioc_drv_stats_s {
        u32     enable_reqs;
        u32     disable_replies;
        u32     enable_replies;
+       u32     rsvd;
 };
 
 /*
@@ -320,7 +352,10 @@ struct bfa_ioc_attr_s {
        struct bfa_ioc_driver_attr_s    driver_attr;    /*  driver attr    */
        struct bfa_ioc_pci_attr_s       pci_attr;
        u8                              port_id;        /*  port number    */
-       u8                              rsvd[7];        /*  64bit align    */
+       u8                              port_mode;      /*  bfa_mode_s  */
+       u8                              cap_bm;         /*  capability  */
+       u8                              port_mode_cfg;  /*  bfa_mode_s  */
+       u8                              rsvd[4];        /*  64bit align */
 };
 
 /*
@@ -337,6 +372,21 @@ struct bfa_ioc_attr_s {
 #define BFA_MFG_SUPPLIER_PARTNUM_SIZE          20
 #define BFA_MFG_SUPPLIER_SERIALNUM_SIZE                20
 #define BFA_MFG_SUPPLIER_REVISION_SIZE         4
+/*
+ * Initial capability definition
+ */
+#define BFA_MFG_IC_FC  0x01
+#define BFA_MFG_IC_ETH 0x02
+
+/*
+ * Adapter capability mask definition
+ */
+#define BFA_CM_HBA     0x01
+#define BFA_CM_CNA     0x02
+#define BFA_CM_NIC     0x04
+#define BFA_CM_FC16G   0x08
+#define BFA_CM_SRIOV   0x10
+#define BFA_CM_MEZZ    0x20
 
 #pragma pack(1)
 
@@ -344,31 +394,39 @@ struct bfa_ioc_attr_s {
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block_s {
-       u8              version;        /*  manufacturing block version */
-       u8              mfg_sig[3];     /*  characters 'M', 'F', 'G' */
-       u16     mfgsize;        /*  mfg block size */
-       u16     u16_chksum;     /*  old u16 checksum */
-       char            brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
-       char            brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
-       u8              mfg_day;        /*  manufacturing day */
-       u8              mfg_month;      /*  manufacturing month */
-       u16     mfg_year;       /*  manufacturing year */
-       wwn_t           mfg_wwn;        /*  wwn base for this adapter */
-       u8              num_wwn;        /*  number of wwns assigned */
-       u8              mfg_speeds;     /*  speeds allowed for this adapter */
-       u8              rsv[2];
-       char            supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
-       char            supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
-       char
-               supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
-       char
-               supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
-       mac_t           mfg_mac;        /*  mac address */
-       u8              num_mac;        /*  number of mac addresses */
-       u8              rsv2;
-       u32     mfg_type;       /*  card type */
-       u8              rsv3[108];
-       u8              md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*  md5 checksum */
+       u8      version;    /*!< manufacturing block version */
+       u8     mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+       u16    mfgsize;    /*!< mfg block size */
+       u16    u16_chksum; /*!< old u16 checksum */
+       char        brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+       char        brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+       u8     mfg_day;    /*!< manufacturing day */
+       u8     mfg_month;  /*!< manufacturing month */
+       u16    mfg_year;   /*!< manufacturing year */
+       wwn_t       mfg_wwn;    /*!< wwn base for this adapter */
+       u8     num_wwn;    /*!< number of wwns assigned */
+       u8     mfg_speeds; /*!< speeds allowed for this adapter */
+       u8     rsv[2];
+       char    supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+       char    supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+       char    supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+       char    supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+       mac_t       mfg_mac;    /*!< base mac address */
+       u8     num_mac;    /*!< number of mac addresses */
+       u8     rsv2;
+       u32    card_type;  /*!< card type          */
+       char        cap_nic;    /*!< capability nic     */
+       char        cap_cna;    /*!< capability cna     */
+       char        cap_hba;    /*!< capability hba     */
+       char        cap_fc16g;  /*!< capability fc 16g      */
+       char        cap_sriov;  /*!< capability sriov       */
+       char        cap_mezz;   /*!< capability mezz        */
+       u8     rsv3;
+       u8     mfg_nports; /*!< number of ports        */
+       char        media[8];   /*!< xfi/xaui           */
+       char        initial_mode[8]; /*!< initial mode: hba/cna/nic */
+       u8     rsv4[84];
+       u8     md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
 };
 
 #pragma pack()
@@ -386,17 +444,27 @@ enum {
        BFA_PCI_DEVICE_ID_FC_8G1P       = 0x17,
        BFA_PCI_DEVICE_ID_CT            = 0x14,
        BFA_PCI_DEVICE_ID_CT_FC         = 0x21,
+       BFA_PCI_DEVICE_ID_CT2           = 0x22,
 };
 
-#define bfa_asic_id_ct(devid)                  \
-       ((devid) == BFA_PCI_DEVICE_ID_CT ||     \
-        (devid) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_cb(__d)                    \
+       ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P ||  \
+        (__d) == BFA_PCI_DEVICE_ID_FC_8G1P)
+#define bfa_asic_id_ct(__d)                    \
+       ((__d) == BFA_PCI_DEVICE_ID_CT ||       \
+        (__d) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_ct2(__d)   ((__d) == BFA_PCI_DEVICE_ID_CT2)
+#define bfa_asic_id_ctc(__d)   \
+       (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
 
 /*
  * PCI sub-system device and vendor ID information
  */
 enum {
        BFA_PCI_FCOE_SSDEVICE_ID        = 0x14,
+       BFA_PCI_CT2_SSID_FCoE           = 0x22,
+       BFA_PCI_CT2_SSID_ETH            = 0x23,
+       BFA_PCI_CT2_SSID_FC             = 0x24,
 };
 
 /*
@@ -416,9 +484,7 @@ enum bfa_port_speed {
        BFA_PORT_SPEED_8GBPS    = 8,
        BFA_PORT_SPEED_10GBPS   = 10,
        BFA_PORT_SPEED_16GBPS   = 16,
-       BFA_PORT_SPEED_AUTO =
-               (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
-                BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
+       BFA_PORT_SPEED_AUTO     = 0xf,
 };
 #define bfa_port_speed_t enum bfa_port_speed
 
@@ -463,4 +529,453 @@ struct bfa_boot_pbc_s {
        struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
 };
 
+/*
+ * ASIC block configuration related structures
+ */
+#define BFA_ABLK_MAX_PORTS     2
+#define BFA_ABLK_MAX_PFS       16
+#define BFA_ABLK_MAX           2
+
+#pragma pack(1)
+enum bfa_mode_s {
+       BFA_MODE_HBA    = 1,
+       BFA_MODE_CNA    = 2,
+       BFA_MODE_NIC    = 3
+};
+
+struct bfa_adapter_cfg_mode_s {
+       u16     max_pf;
+       u16     max_vf;
+       enum bfa_mode_s mode;
+};
+
+struct bfa_ablk_cfg_pf_s {
+       u16     pers;
+       u8      port_id;
+       u8      optrom;
+       u8      valid;
+       u8      sriov;
+       u8      max_vfs;
+       u8      rsvd[1];
+       u16     num_qpairs;
+       u16     num_vectors;
+       u32     bw;
+};
+
+struct bfa_ablk_cfg_port_s {
+       u8      mode;
+       u8      type;
+       u8      max_pfs;
+       u8      rsvd[5];
+};
+
+struct bfa_ablk_cfg_inst_s {
+       u8      nports;
+       u8      max_pfs;
+       u8      rsvd[6];
+       struct bfa_ablk_cfg_pf_s        pf_cfg[BFA_ABLK_MAX_PFS];
+       struct bfa_ablk_cfg_port_s      port_cfg[BFA_ABLK_MAX_PORTS];
+};
+
+struct bfa_ablk_cfg_s {
+       struct bfa_ablk_cfg_inst_s      inst[BFA_ABLK_MAX];
+};
+
+
+/*
+ *     SFP module specific
+ */
+#define SFP_DIAGMON_SIZE       10 /* num bytes of diag monitor data */
+
+enum bfa_defs_sfp_media_e {
+       BFA_SFP_MEDIA_UNKNOWN   = 0x00,
+       BFA_SFP_MEDIA_CU        = 0x01,
+       BFA_SFP_MEDIA_LW        = 0x02,
+       BFA_SFP_MEDIA_SW        = 0x03,
+       BFA_SFP_MEDIA_EL        = 0x04,
+       BFA_SFP_MEDIA_UNSUPPORT = 0x05,
+};
+
+/*
+ * values for xmtr_tech above
+ */
+enum {
+       SFP_XMTR_TECH_CU = (1 << 0),    /* copper FC-BaseT */
+       SFP_XMTR_TECH_CP = (1 << 1),    /* copper passive */
+       SFP_XMTR_TECH_CA = (1 << 2),    /* copper active */
+       SFP_XMTR_TECH_LL = (1 << 3),    /* longwave laser */
+       SFP_XMTR_TECH_SL = (1 << 4),    /* shortwave laser w/ OFC */
+       SFP_XMTR_TECH_SN = (1 << 5),    /* shortwave laser w/o OFC */
+       SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */
+       SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */
+       SFP_XMTR_TECH_LC = (1 << 8),    /* longwave laser */
+       SFP_XMTR_TECH_SA = (1 << 9)
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Basic ID field total 64 bytes
+ */
+struct sfp_srlid_base_s {
+       u8      id;             /* 00: Identifier */
+       u8      extid;          /* 01: Extended Identifier */
+       u8      connector;      /* 02: Connector */
+       u8      xcvr[8];        /* 03-10: Transceiver */
+       u8      encoding;       /* 11: Encoding */
+       u8      br_norm;        /* 12: BR, Nominal */
+       u8      rate_id;        /* 13: Rate Identifier */
+       u8      len_km;         /* 14: Length single mode km */
+       u8      len_100m;       /* 15: Length single mode 100m */
+       u8      len_om2;        /* 16: Length om2 fiber 10m */
+       u8      len_om1;        /* 17: Length om1 fiber 10m */
+       u8      len_cu;         /* 18: Length copper 1m */
+       u8      len_om3;        /* 19: Length om3 fiber 10m */
+       u8      vendor_name[16];/* 20-35 */
+       u8      unalloc1;
+       u8      vendor_oui[3];  /* 37-39 */
+       u8      vendor_pn[16];  /* 40-55 */
+       u8      vendor_rev[4];  /* 56-59 */
+       u8      wavelen[2];     /* 60-61 */
+       u8      unalloc2;
+       u8      cc_base;        /* 63: check code for base id field */
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Extended id field total 32 bytes
+ */
+struct sfp_srlid_ext_s {
+       u8      options[2];
+       u8      br_max;
+       u8      br_min;
+       u8      vendor_sn[16];
+       u8      date_code[8];
+       u8      diag_mon_type;  /* 92: Diagnostic Monitoring type */
+       u8      en_options;
+       u8      sff_8472;
+       u8      cc_ext;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status base field total 96 bytes
+ */
+struct sfp_diag_base_s {
+       /*
+        * Alarm and warning Thresholds 40 bytes
+        */
+       u8      temp_high_alarm[2]; /* 00-01 */
+       u8      temp_low_alarm[2];  /* 02-03 */
+       u8      temp_high_warning[2];   /* 04-05 */
+       u8      temp_low_warning[2];    /* 06-07 */
+
+       u8      volt_high_alarm[2]; /* 08-09 */
+       u8      volt_low_alarm[2];  /* 10-11 */
+       u8      volt_high_warning[2];   /* 12-13 */
+       u8      volt_low_warning[2];    /* 14-15 */
+
+       u8      bias_high_alarm[2]; /* 16-17 */
+       u8      bias_low_alarm[2];  /* 18-19 */
+       u8      bias_high_warning[2];   /* 20-21 */
+       u8      bias_low_warning[2];    /* 22-23 */
+
+       u8      tx_pwr_high_alarm[2];   /* 24-25 */
+       u8      tx_pwr_low_alarm[2];    /* 26-27 */
+       u8      tx_pwr_high_warning[2]; /* 28-29 */
+       u8      tx_pwr_low_warning[2];  /* 30-31 */
+
+       u8      rx_pwr_high_alarm[2];   /* 32-33 */
+       u8      rx_pwr_low_alarm[2];    /* 34-35 */
+       u8      rx_pwr_high_warning[2]; /* 36-37 */
+       u8      rx_pwr_low_warning[2];  /* 38-39 */
+
+       u8      unallocate_1[16];
+
+       /*
+        * ext_cal_const[36]
+        */
+       u8      rx_pwr[20];
+       u8      tx_i[4];
+       u8      tx_pwr[4];
+       u8      temp[4];
+       u8      volt[4];
+       u8      unallocate_2[3];
+       u8      cc_dmi;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status extended field total 24 bytes
+ */
+struct sfp_diag_ext_s {
+       u8      diag[SFP_DIAGMON_SIZE];
+       u8      unalloc1[4];
+       u8      status_ctl;
+       u8      rsvd;
+       u8      alarm_flags[2];
+       u8      unalloc2[2];
+       u8      warning_flags[2];
+       u8      ext_status_ctl[2];
+};
+
+struct sfp_mem_s {
+       struct sfp_srlid_base_s srlid_base;
+       struct sfp_srlid_ext_s  srlid_ext;
+       struct sfp_diag_base_s  diag_base;
+       struct sfp_diag_ext_s   diag_ext;
+};
+
+/*
+ * transceiver codes (SFF-8472 Rev 10.2 Table 3.5)
+ */
+union sfp_xcvr_e10g_code_u {
+       u8              b;
+       struct {
+#ifdef __BIGENDIAN
+               u8      e10g_unall:1;   /* 10G Ethernet compliance */
+               u8      e10g_lrm:1;
+               u8      e10g_lr:1;
+               u8      e10g_sr:1;
+               u8      ib_sx:1;    /* Infiniband compliance */
+               u8      ib_lx:1;
+               u8      ib_cu_a:1;
+               u8      ib_cu_p:1;
+#else
+               u8      ib_cu_p:1;
+               u8      ib_cu_a:1;
+               u8      ib_lx:1;
+               u8      ib_sx:1;    /* Infiniband compliance */
+               u8      e10g_sr:1;
+               u8      e10g_lr:1;
+               u8      e10g_lrm:1;
+               u8      e10g_unall:1;   /* 10G Ethernet compliance */
+#endif
+       } r;
+};
+
+union sfp_xcvr_so1_code_u {
+       u8              b;
+       struct {
+               u8      escon:2;    /* ESCON compliance code */
+               u8      oc192_reach:1;  /* SONET compliance code */
+               u8      so_reach:2;
+               u8      oc48_reach:3;
+       } r;
+};
+
+union sfp_xcvr_so2_code_u {
+       u8              b;
+       struct {
+               u8      reserved:1;
+               u8      oc12_reach:3;   /* OC12 reach */
+               u8      reserved1:1;
+               u8      oc3_reach:3;    /* OC3 reach */
+       } r;
+};
+
+union sfp_xcvr_eth_code_u {
+       u8              b;
+       struct {
+               u8      base_px:1;
+               u8      base_bx10:1;
+               u8      e100base_fx:1;
+               u8      e100base_lx:1;
+               u8      e1000base_t:1;
+               u8      e1000base_cx:1;
+               u8      e1000base_lx:1;
+               u8      e1000base_sx:1;
+       } r;
+};
+
+struct sfp_xcvr_fc1_code_s {
+       u8      link_len:5; /* FC link length */
+       u8      xmtr_tech2:3;
+       u8      xmtr_tech1:7;   /* FC transmitter technology */
+       u8      reserved1:1;
+};
+
+union sfp_xcvr_fc2_code_u {
+       u8              b;
+       struct {
+               u8      tw_media:1; /* twin axial pair (tw) */
+               u8      tp_media:1; /* shielded twisted pair (sp) */
+               u8      mi_media:1; /* miniature coax (mi) */
+               u8      tv_media:1; /* video coax (tv) */
+               u8      m6_media:1; /* multimode, 62.5m (m6) */
+               u8      m5_media:1; /* multimode, 50m (m5) */
+               u8      reserved:1;
+               u8      sm_media:1; /* single mode (sm) */
+       } r;
+};
+
+union sfp_xcvr_fc3_code_u {
+       u8              b;
+       struct {
+#ifdef __BIGENDIAN
+               u8      rsv4:1;
+               u8      mb800:1;    /* 800 Mbytes/sec */
+               u8      mb1600:1;   /* 1600 Mbytes/sec */
+               u8      mb400:1;    /* 400 Mbytes/sec */
+               u8      rsv2:1;
+               u8      mb200:1;    /* 200 Mbytes/sec */
+               u8      rsv1:1;
+               u8      mb100:1;    /* 100 Mbytes/sec */
+#else
+               u8      mb100:1;    /* 100 Mbytes/sec */
+               u8      rsv1:1;
+               u8      mb200:1;    /* 200 Mbytes/sec */
+               u8      rsv2:1;
+               u8      mb400:1;    /* 400 Mbytes/sec */
+               u8      mb1600:1;   /* 1600 Mbytes/sec */
+               u8      mb800:1;    /* 800 Mbytes/sec */
+               u8      rsv4:1;
+#endif
+       } r;
+};
+
+struct sfp_xcvr_s {
+       union sfp_xcvr_e10g_code_u      e10g;
+       union sfp_xcvr_so1_code_u       so1;
+       union sfp_xcvr_so2_code_u       so2;
+       union sfp_xcvr_eth_code_u       eth;
+       struct sfp_xcvr_fc1_code_s      fc1;
+       union sfp_xcvr_fc2_code_u       fc2;
+       union sfp_xcvr_fc3_code_u       fc3;
+};
+
+/*
+ *     Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE      32      /* partition entry size */
+#define BFA_FLASH_PART_MAX             32      /* maximal # of partitions */
+
+enum bfa_flash_part_type {
+       BFA_FLASH_PART_OPTROM   = 1,    /* option rom partition */
+       BFA_FLASH_PART_FWIMG    = 2,    /* firmware image partition */
+       BFA_FLASH_PART_FWCFG    = 3,    /* firmware tuneable config */
+       BFA_FLASH_PART_DRV      = 4,    /* IOC driver config */
+       BFA_FLASH_PART_BOOT     = 5,    /* boot config */
+       BFA_FLASH_PART_ASIC     = 6,    /* asic bootstrap configuration */
+       BFA_FLASH_PART_MFG      = 7,    /* manufacturing block partition */
+       BFA_FLASH_PART_OPTROM2  = 8,    /* 2nd option rom partition */
+       BFA_FLASH_PART_VPD      = 9,    /* vpd data of OEM info */
+       BFA_FLASH_PART_PBC      = 10,   /* pre-boot config */
+       BFA_FLASH_PART_BOOTOVL  = 11,   /* boot overlay partition */
+       BFA_FLASH_PART_LOG      = 12,   /* firmware log partition */
+       BFA_FLASH_PART_PXECFG   = 13,   /* pxe boot config partition */
+       BFA_FLASH_PART_PXEOVL   = 14,   /* pxe boot overlay partition */
+       BFA_FLASH_PART_PORTCFG  = 15,   /* port cfg partition */
+       BFA_FLASH_PART_ASICBK   = 16,   /* asic backup partition */
+};
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr_s {
+       u32     part_type;      /* partition type */
+       u32     part_instance;  /* partition instance */
+       u32     part_off;       /* partition offset */
+       u32     part_size;      /* partition size */
+       u32     part_len;       /* partition content length */
+       u32     part_status;    /* partition status */
+       char    rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr_s {
+       u32     status; /* flash overall status */
+       u32     npart;  /* num of partitions */
+       struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX];
+};
+
+/*
+ *     DIAG module specific
+ */
+#define LB_PATTERN_DEFAULT     0xB5B5B5B5
+#define QTEST_CNT_DEFAULT      10
+#define QTEST_PAT_DEFAULT      LB_PATTERN_DEFAULT
+
+struct bfa_diag_memtest_s {
+       u8      algo;
+       u8      rsvd[7];
+};
+
+struct bfa_diag_memtest_result {
+       u32     status;
+       u32     addr;
+       u32     exp; /* expect value read from reg */
+       u32     act; /* actually value read */
+       u32     err_status;             /* error status reg */
+       u32     err_status1;    /* extra error info reg */
+       u32     err_addr; /* error address reg */
+       u8      algo;
+       u8      rsv[3];
+};
+
+struct bfa_diag_loopback_result_s {
+       u32     numtxmfrm;      /* no. of transmit frame */
+       u32     numosffrm;      /* no. of outstanding frame */
+       u32     numrcvfrm;      /* no. of received good frame */
+       u32     badfrminf;      /* mis-match info */
+       u32     badfrmnum;      /* mis-match fram number */
+       u8      status;         /* loopback test result */
+       u8      rsvd[3];
+};
+
+struct bfa_diag_ledtest_s {
+       u32     cmd;    /* bfa_led_op_t */
+       u32     color;  /* bfa_led_color_t */
+       u16     freq;   /* no. of blinks every 10 secs */
+       u8      led;    /* bitmap of LEDs to be tested */
+       u8      rsvd[5];
+};
+
+struct bfa_diag_loopback_s {
+       u32     loopcnt;
+       u32     pattern;
+       u8      lb_mode;    /* bfa_port_opmode_t */
+       u8      speed;      /* bfa_port_speed_t */
+       u8      rsvd[2];
+};
+
+/*
+ *     PHY module specific
+ */
+enum bfa_phy_status_e {
+       BFA_PHY_STATUS_GOOD     = 0, /* phy is good */
+       BFA_PHY_STATUS_NOT_PRESENT      = 1, /* phy does not exist */
+       BFA_PHY_STATUS_BAD      = 2, /* phy is bad */
+};
+
+/*
+ * phy attributes for phy query
+ */
+struct bfa_phy_attr_s {
+       u32     status;         /* phy present/absent status */
+       u32     length;         /* firmware length */
+       u32     fw_ver;         /* firmware version */
+       u32     an_status;      /* AN status */
+       u32     pma_pmd_status; /* PMA/PMD link status */
+       u32     pma_pmd_signal; /* PMA/PMD signal detect */
+       u32     pcs_status;     /* PCS link status */
+};
+
+/*
+ * phy stats
+ */
+struct bfa_phy_stats_s {
+       u32     status;         /* phy stats status */
+       u32     link_breaks;    /* Num of link breaks after linkup */
+       u32     pma_pmd_fault;  /* NPMA/PMD fault */
+       u32     pcs_fault;      /* PCS fault */
+       u32     speed_neg;      /* Num of speed negotiation */
+       u32     tx_eq_training; /* Num of TX EQ training */
+       u32     tx_eq_timeout;  /* Num of TX EQ timeout */
+       u32     crc_error;      /* Num of CRC errors */
+};
+
+#pragma pack()
+
 #endif /* __BFA_DEFS_H__ */
index 191d34a..3bbc583 100644 (file)
@@ -90,12 +90,14 @@ enum bfa_lport_role {
  * FCS port configuration.
  */
 struct bfa_lport_cfg_s {
-    wwn_t             pwwn;       /*  port wwn */
-    wwn_t             nwwn;       /*  node wwn */
-    struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
-    bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
-    enum bfa_lport_role     roles;      /*  FCS port roles */
-    u8      tag[16];   /*  opaque tag from application */
+       wwn_t          pwwn;       /*  port wwn */
+       wwn_t          nwwn;       /*  node wwn */
+       struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
+       enum bfa_lport_role roles;      /* FCS port roles */
+       u32     rsvd;
+       bfa_boolean_t   preboot_vp;  /*  vport created from PBC */
+       u8      tag[16];        /* opaque tag from application */
+       u8      padding[4];
 };
 
 /*
@@ -249,12 +251,13 @@ enum bfa_vport_state {
        BFA_FCS_VPORT_FDISC_SEND        = 2,
        BFA_FCS_VPORT_FDISC             = 3,
        BFA_FCS_VPORT_FDISC_RETRY       = 4,
-       BFA_FCS_VPORT_ONLINE            = 5,
-       BFA_FCS_VPORT_DELETING          = 6,
-       BFA_FCS_VPORT_CLEANUP           = 6,
-       BFA_FCS_VPORT_LOGO_SEND         = 7,
-       BFA_FCS_VPORT_LOGO              = 8,
-       BFA_FCS_VPORT_ERROR             = 9,
+       BFA_FCS_VPORT_FDISC_RSP_WAIT    = 5,
+       BFA_FCS_VPORT_ONLINE            = 6,
+       BFA_FCS_VPORT_DELETING          = 7,
+       BFA_FCS_VPORT_CLEANUP           = 8,
+       BFA_FCS_VPORT_LOGO_SEND         = 9,
+       BFA_FCS_VPORT_LOGO              = 10,
+       BFA_FCS_VPORT_ERROR             = 11,
        BFA_FCS_VPORT_MAX_STATE,
 };
 
index 207f598..0b97525 100644 (file)
@@ -47,13 +47,12 @@ struct bfa_iocfc_fwcfg_s {
        u16        num_rports;  /*  number of remote ports      */
        u16        num_ioim_reqs;       /*  number of IO reqs           */
        u16        num_tskim_reqs;      /*  task management requests    */
-       u16        num_iotm_reqs;       /*  number of TM IO reqs        */
-       u16        num_tsktm_reqs;      /*  TM task management requests*/
+       u16        num_fwtio_reqs;      /* number of TM IO reqs in FW */
        u16        num_fcxp_reqs;       /*  unassisted FC exchanges     */
        u16        num_uf_bufs; /*  unsolicited recv buffers    */
        u8              num_cqs;
        u8              fw_tick_res;    /*  FW clock resolution in ms */
-       u8              rsvd[4];
+       u8              rsvd[2];
 };
 #pragma pack()
 
@@ -66,8 +65,12 @@ struct bfa_iocfc_drvcfg_s {
        u16         ioc_recover;        /*  IOC recovery mode             */
        u16         min_cfg;    /*  minimum configuration         */
        u16        path_tov;    /*  device path timeout   */
+       u16             num_tio_reqs;   /*!< number of TM IO reqs       */
+       u8              port_mode;
+       u8              rsvd_a;
        bfa_boolean_t   delay_comp; /*  delay completion of
                                                        failed inflight IOs */
+       u16             num_ttsk_reqs;   /* TM task management requests */
        u32             rsvd;
 };
 
@@ -82,7 +85,7 @@ struct bfa_iocfc_cfg_s {
 /*
  * IOC firmware IO stats
  */
-struct bfa_fw_io_stats_s {
+struct bfa_fw_ioim_stats_s {
        u32     host_abort;             /*  IO aborted by host driver*/
        u32     host_cleanup;           /*  IO clean up by host driver */
 
@@ -152,6 +155,54 @@ struct bfa_fw_io_stats_s {
                                                 */
 };
 
+struct bfa_fw_tio_stats_s {
+       u32     tio_conf_proc;  /* TIO CONF processed */
+       u32     tio_conf_drop;      /* TIO CONF dropped */
+       u32     tio_cleanup_req;    /* TIO cleanup requested */
+       u32     tio_cleanup_comp;   /* TIO cleanup completed */
+       u32     tio_abort_rsp;      /* TIO abort response */
+       u32     tio_abort_rsp_comp; /* TIO abort rsp completed */
+       u32     tio_abts_req;       /* TIO ABTS requested */
+       u32     tio_abts_ack;       /* TIO ABTS ack-ed */
+       u32     tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+       u32     tio_abts_tmo;       /* TIO ABTS timeout */
+       u32     tio_snsdata_dma;    /* TIO sense data DMA */
+       u32     tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+       u32     tio_rxwchan_avail; /* TIO RX wait channel available */
+       u32     tio_hit_bls;        /* TIO IOH BLS event */
+       u32     tio_uf_recv;        /* TIO received UF */
+       u32     tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+       u32     tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+
+       u32     ds_rxwchan_wait; /* DS waiting for RX wait channel */
+       u32     ds_rxwchan_avail; /* DS RX wait channel available */
+       u32     ds_unaligned_rd;    /* DS unaligned read */
+       u32     ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
+       u32     ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+       u32     ds_flush_req;       /* DS flush requested */
+       u32     ds_flush_comp;      /* DS flush completed */
+       u32     ds_xfrdy_exp;       /* DS XFER_RDY expired */
+       u32     ds_seq_cnt_err;     /* DS seq cnt error */
+       u32     ds_seq_len_err;     /* DS seq len error */
+       u32     ds_data_oor;        /* DS data out of order */
+       u32     ds_hit_bls;     /* DS hit BLS */
+       u32     ds_edtov_timer_exp; /* DS edtov expired */
+       u32     ds_cpu_owned;       /* DS cpu owned */
+       u32     ds_hit_class2;      /* DS hit class2 */
+       u32     ds_length_err;      /* DS length error */
+       u32     ds_ro_ooo_err;      /* DS relative offset out-of-order error */
+       u32     ds_rectov_timer_exp;    /* DS rectov expired */
+       u32     ds_unexp_fr_err;    /* DS unexp frame error */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_io_stats_s {
+       struct bfa_fw_ioim_stats_s      ioim_stats;
+       struct bfa_fw_tio_stats_s       tio_stats;
+};
+
 /*
  * IOC port firmware stats
  */
@@ -205,6 +256,7 @@ struct bfa_fw_port_lksm_stats_s {
     u32    nos_tx;             /*  No. of times NOS tx started         */
     u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
     u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM      */
+       u32     bbsc_lr;        /* LKSM LR tx for credit recovery       */
 };
 
 struct bfa_fw_port_snsm_stats_s {
@@ -266,8 +318,8 @@ struct bfa_fw_fcoe_stats_s {
  * IOC firmware FCoE port stats
  */
 struct bfa_fw_fcoe_port_stats_s {
-    struct bfa_fw_fcoe_stats_s  fcoe_stats;
-    struct bfa_fw_fip_stats_s   fip_stats;
+       struct bfa_fw_fcoe_stats_s  fcoe_stats;
+       struct bfa_fw_fip_stats_s   fip_stats;
 };
 
 /*
@@ -636,6 +688,7 @@ enum bfa_port_states {
        BFA_PORT_ST_FWMISMATCH          = 12,
        BFA_PORT_ST_PREBOOT_DISABLED    = 13,
        BFA_PORT_ST_TOGGLING_QWAIT      = 14,
+       BFA_PORT_ST_ACQ_ADDR            = 15,
        BFA_PORT_ST_MAX_STATE,
 };
 
@@ -748,6 +801,10 @@ struct bfa_port_cfg_s {
        u8       tx_bbcredit;   /*  transmit buffer credits     */
        u8       ratelimit;     /*  ratelimit enabled or not    */
        u8       trl_def_speed; /*  ratelimit default speed     */
+       u8      bb_scn;         /*  BB_SCN value from FLOGI Exchg */
+       u8      bb_scn_state;   /*  Config state of BB_SCN */
+       u8      faa_state;      /*  FAA enabled/disabled        */
+       u8      rsvd[1];
        u16 path_tov;   /*  device path timeout */
        u16 q_depth;    /*  SCSI Queue depth            */
 };
@@ -783,7 +840,7 @@ struct bfa_port_attr_s {
        enum bfa_port_topology  topology;       /*  current topology */
        bfa_boolean_t           beacon;         /*  current beacon status */
        bfa_boolean_t           link_e2e_beacon; /*  link beacon is on */
-       bfa_boolean_t           plog_enabled;   /*  portlog is enabled */
+       bfa_boolean_t   bbsc_op_status; /* fc credit recovery oper state */
 
        /*
         * Dynamic field - info from FCS
@@ -792,12 +849,10 @@ struct bfa_port_attr_s {
        enum bfa_port_type      port_type;      /*  current topology */
        u32             loopback;       /*  external loopback */
        u32             authfail;       /*  auth fail state */
-       bfa_boolean_t           io_profile;     /*  get it from fcpim mod */
-       u8                      pad[4];         /*  for 64-bit alignement */
 
        /* FCoE specific  */
        u16             fcoe_vlan;
-       u8                      rsvd1[6];
+       u8                      rsvd1[2];
 };
 
 /*
@@ -987,6 +1042,19 @@ struct bfa_itnim_ioprofile_s {
        struct bfa_itnim_latency_s io_latency;
 };
 
+/*
+ *     vHBA port attribute values.
+ */
+struct bfa_vhba_attr_s {
+       wwn_t   nwwn;       /* node wwn */
+       wwn_t   pwwn;       /* port wwn */
+       u32     pid;        /* port ID */
+       bfa_boolean_t       io_profile; /* get it from fcpim mod */
+       bfa_boolean_t       plog_enabled;   /* portlog is enabled */
+       u16     path_tov;
+       u8      rsvd[2];
+};
+
 /*
  * FC physical port statistics.
  */
@@ -1020,6 +1088,9 @@ struct bfa_port_fc_stats_s {
        u64     bad_os_count;   /*  Invalid ordered sets        */
        u64     err_enc_out;    /*  Encoding err nonframe_8b10b */
        u64     err_enc;        /*  Encoding err frame_8b10b    */
+       u64     bbsc_frames_lost; /* Credit Recovery-Frames Lost  */
+       u64     bbsc_credits_lost; /* Credit Recovery-Credits Lost */
+       u64     bbsc_link_resets; /* Credit Recovery-Link Resets   */
 };
 
 /*
@@ -1078,4 +1149,83 @@ union bfa_port_stats_u {
        struct bfa_port_eth_stats_s     eth;
 };
 
+struct bfa_port_cfg_mode_s {
+       u16             max_pf;
+       u16             max_vf;
+       enum bfa_mode_s mode;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN    (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY      (8)
+#define BFA_CEE_DCBX_MAX_PGID          (8)
+
+struct bfa_cee_lldp_str_s {
+       u8      sub_type;
+       u8      len;
+       u8      rsvd[2];
+       u8      value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+struct bfa_cee_lldp_cfg_s {
+       struct bfa_cee_lldp_str_s chassis_id;
+       struct bfa_cee_lldp_str_s port_id;
+       struct bfa_cee_lldp_str_s port_desc;
+       struct bfa_cee_lldp_str_s sys_name;
+       struct bfa_cee_lldp_str_s sys_desc;
+       struct bfa_cee_lldp_str_s mgmt_addr;
+       u16     time_to_live;
+       u16     enabled_system_cap;
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg_s {
+       u8      pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+       u8      pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+       u8      pfc_primap; /* bitmap of priorties with PFC enabled */
+       u8      fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+       u8      iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+       u8      dcbx_version; /* operating version:CEE or preCEE */
+       u8      lls_fcoe; /* FCoE Logical Link Status */
+       u8      lls_lan; /* LAN Logical Link Status */
+       u8      rsvd[2];
+};
+
+/* CEE Query */
+struct bfa_cee_attr_s {
+       u8      cee_status;
+       u8      error_reason;
+       struct bfa_cee_lldp_cfg_s lldp_remote;
+       struct bfa_cee_dcbx_cfg_s dcbx_remote;
+       mac_t src_mac;
+       u8      link_speed;
+       u8      nw_priority;
+       u8      filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats_s {
+       u32             lldp_tx_frames;         /* LLDP Tx Frames */
+       u32             lldp_rx_frames;         /* LLDP Rx Frames */
+       u32             lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */
+       u32             lldp_rx_frames_new;     /* LLDP Rx Frames new */
+       u32             lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */
+       u32             lldp_rx_shutdown_tlvs;  /* LLDP Rx shutdown TLVs */
+       u32             lldp_info_aged_out;     /* LLDP remote info aged */
+       u32             dcbx_phylink_ups;       /* DCBX phy link ups */
+       u32             dcbx_phylink_downs;     /* DCBX phy link downs */
+       u32             dcbx_rx_tlvs;           /* DCBX Rx TLVs */
+       u32             dcbx_rx_tlvs_invalid;   /* DCBX Rx TLVs invalid */
+       u32             dcbx_control_tlv_error; /* DCBX control TLV errors */
+       u32             dcbx_feature_tlv_error; /* DCBX feature TLV errors */
+       u32             dcbx_cee_cfg_new;       /* DCBX new CEE cfg rcvd */
+       u32             cee_status_down;        /* DCB status down */
+       u32             cee_status_up;          /* DCB status up */
+       u32             cee_hw_cfg_changed;     /* DCB hw cfg changed */
+       u32             cee_rx_invalid_cfg;     /* DCB invalid cfg */
+};
+
+#pragma pack()
+
 #endif /* __BFA_DEFS_SVC_H__ */
index bf0067e..8d0b88f 100644 (file)
@@ -1021,7 +1021,7 @@ struct fc_symname_s {
 #define FC_ED_TOV      2
 #define FC_REC_TOV     (FC_ED_TOV + 1)
 #define FC_RA_TOV      10
-#define FC_ELS_TOV     (2 * FC_RA_TOV)
+#define FC_ELS_TOV     ((2 * FC_RA_TOV) + 1)
 #define FC_FCCT_TOV    (3 * FC_RA_TOV)
 
 /*
@@ -1048,15 +1048,6 @@ struct fc_vft_s {
        u32        res_c:24;
 };
 
-/*
- * FCP
- */
-enum {
-       FCP_RJT         = 0x01000000,   /* SRR reject */
-       FCP_SRR_ACCEPT  = 0x02000000,   /* SRR accept */
-       FCP_SRR         = 0x14000000,   /* Sequence Retransmission Request */
-};
-
 /*
  * FCP_CMND definitions
  */
index b7e2534..17b59b8 100644 (file)
@@ -94,7 +94,6 @@ fcbuild_init(void)
         */
        plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
        plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
-       plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
        plogi_tmpl.csp.ciro = 0x1;
        plogi_tmpl.csp.cisc = 0x0;
        plogi_tmpl.csp.altbbcred = 0x0;
@@ -156,6 +155,22 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
         */
 }
 
+static void
+fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+       memset(fchs, 0, sizeof(struct fchs_s));
+
+       fchs->routing = FC_RTG_FC4_DEV_DATA;
+       fchs->cat_info = FC_CAT_SOLICIT_CTRL;
+       fchs->type = FC_TYPE_SERVICES;
+       fchs->f_ctl =
+               bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+                          FCTL_END_SEQ | FCTL_SI_XFER);
+       fchs->d_id = d_id;
+       fchs->s_id = s_id;
+       fchs->ox_id = ox_id;
+}
+
 void
 fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
@@ -207,7 +222,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 static          u16
 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
                 __be16 ox_id, wwn_t port_name, wwn_t node_name,
-                u16 pdu_size, u8 els_code)
+                u16 pdu_size, u16 bb_cr, u8 els_code)
 {
        struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
 
@@ -220,6 +235,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
                fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
        plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
+       plogi->csp.bbcred  = cpu_to_be16(bb_cr);
 
        memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
        memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
@@ -268,15 +284,17 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 u16
 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
                   __be16 ox_id, wwn_t port_name, wwn_t node_name,
-                  u16 pdu_size, u16 local_bb_credits)
+                  u16 pdu_size, u16 local_bb_credits, u8 bb_scn)
 {
        u32        d_id = 0;
+       u16        bbscn_rxsz = (bb_scn << 12) | pdu_size;
 
        memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
        flogi->els_cmd.els_code = FC_ELS_ACC;
-       flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+       flogi->class3.rxsz = cpu_to_be16(pdu_size);
+       flogi->csp.rxsz  = cpu_to_be16(bbscn_rxsz);     /* bb_scn/rxsz */
        flogi->port_name = port_name;
        flogi->node_name = node_name;
 
@@ -306,19 +324,19 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 u16
 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
               u16 ox_id, wwn_t port_name, wwn_t node_name,
-              u16 pdu_size)
+              u16 pdu_size, u16 bb_cr)
 {
        return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
-                               node_name, pdu_size, FC_ELS_PLOGI);
+                               node_name, pdu_size, bb_cr, FC_ELS_PLOGI);
 }
 
 u16
 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
                   u16 ox_id, wwn_t port_name, wwn_t node_name,
-                  u16 pdu_size)
+                  u16 pdu_size, u16 bb_cr)
 {
        return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
-                               node_name, pdu_size, FC_ELS_ACC);
+                               node_name, pdu_size, bb_cr, FC_ELS_ACC);
 }
 
 enum fc_parse_status
@@ -1095,6 +1113,21 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
        return FC_PARSE_OK;
 }
 
+u16
+fc_gs_rjt_build(struct fchs_s *fchs,  struct ct_hdr_s *cthdr,
+               u32 d_id, u32 s_id, u16 ox_id, u8 reason_code,
+               u8 reason_code_expl)
+{
+       fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id);
+
+       cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT);
+       cthdr->rev_id = CT_GS3_REVISION;
+
+       cthdr->reason_code = reason_code;
+       cthdr->exp_code    = reason_code_expl;
+       return sizeof(struct ct_hdr_s);
+}
+
 u16
 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
                u8 set_br_reg, u32 s_id, u16 ox_id)
index ece51ec..42cd9d4 100644 (file)
@@ -66,6 +66,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
        case RPSC_OP_SPEED_8G:
                return BFA_PORT_SPEED_8GBPS;
 
+       case RPSC_OP_SPEED_16G:
+               return BFA_PORT_SPEED_16GBPS;
+
        case RPSC_OP_SPEED_10G:
                return BFA_PORT_SPEED_10GBPS;
 
@@ -94,6 +97,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
        case BFA_PORT_SPEED_8GBPS:
                return RPSC_OP_SPEED_8G;
 
+       case BFA_PORT_SPEED_16GBPS:
+               return RPSC_OP_SPEED_16G;
+
        case BFA_PORT_SPEED_10GBPS:
                return RPSC_OP_SPEED_10G;
 
@@ -141,11 +147,11 @@ u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
                                   u32 s_id, __be16 ox_id,
                                   wwn_t port_name, wwn_t node_name,
                                   u16 pdu_size,
-                                  u16 local_bb_credits);
+                                  u16 local_bb_credits, u8 bb_scn);
 
 u16        fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
                               u32 s_id, u16 ox_id, wwn_t port_name,
-                              wwn_t node_name, u16 pdu_size);
+                              wwn_t node_name, u16 pdu_size, u16 bb_cr);
 
 enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
 
@@ -177,13 +183,17 @@ u16        fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 u16        fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
                               u16 ox_id, u32 port_id);
 
+u16    fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+                       u32 d_id, u32 s_id, u16 ox_id,
+                       u8 reason_code, u8 reason_code_expl);
+
 u16        fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
                        u8 set_br_reg, u32 s_id, u16 ox_id);
 
 u16        fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
                                   u32 s_id, u16 ox_id,
                                   wwn_t port_name, wwn_t node_name,
-                                  u16 pdu_size);
+                                  u16 pdu_size, u16 bb_cr);
 
 u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
                        u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
index c0353cd..a4e7951 100644 (file)
@@ -19,7 +19,6 @@
 #include "bfa_modules.h"
 
 BFA_TRC_FILE(HAL, FCPIM);
-BFA_MODULE(fcpim);
 
 /*
  *  BFA ITNIM Related definitions
@@ -287,24 +286,16 @@ static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
  * Compute and return memory needed by FCP(im) module.
  */
 static void
-bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
 {
-       bfa_itnim_meminfo(cfg, km_len, dm_len);
+       bfa_itnim_meminfo(cfg, km_len);
 
        /*
         * IO memory
         */
-       if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
-               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
-       else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
-               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
-
        *km_len += cfg->fwcfg.num_ioim_reqs *
          (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
 
-       *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
-
        /*
         * task management command memory
         */
@@ -315,52 +306,41 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 
 
 static void
-bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
+               struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+       struct bfa_s *bfa = fcp->bfa;
 
        bfa_trc(bfa, cfg->drvcfg.path_tov);
        bfa_trc(bfa, cfg->fwcfg.num_rports);
        bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
        bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
 
+       fcpim->fcp              = fcp;
        fcpim->bfa              = bfa;
        fcpim->num_itnims       = cfg->fwcfg.num_rports;
-       fcpim->num_ioim_reqs  = cfg->fwcfg.num_ioim_reqs;
        fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
        fcpim->path_tov         = cfg->drvcfg.path_tov;
        fcpim->delay_comp       = cfg->drvcfg.delay_comp;
        fcpim->profile_comp = NULL;
        fcpim->profile_start = NULL;
 
-       bfa_itnim_attach(fcpim, meminfo);
-       bfa_tskim_attach(fcpim, meminfo);
-       bfa_ioim_attach(fcpim, meminfo);
-}
-
-static void
-bfa_fcpim_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcpim_start(struct bfa_s *bfa)
-{
+       bfa_itnim_attach(fcpim);
+       bfa_tskim_attach(fcpim);
+       bfa_ioim_attach(fcpim);
 }
 
 static void
-bfa_fcpim_stop(struct bfa_s *bfa)
+bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
 {
-}
-
-static void
-bfa_fcpim_iocdisable(struct bfa_s *bfa)
-{
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = &fcp->fcpim;
        struct bfa_itnim_s *itnim;
        struct list_head *qe, *qen;
 
+       /* Enqueue unused ioim resources to free_q */
+       list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
+
        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
                itnim = (struct bfa_itnim_s *) qe;
                bfa_itnim_iocdisable(itnim);
@@ -370,7 +350,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
 void
 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 
        fcpim->path_tov = path_tov * 1000;
        if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
@@ -380,15 +360,87 @@ bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 u16
 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 
        return fcpim->path_tov / 1000;
 }
 
+#define bfa_fcpim_add_iostats(__l, __r, __stats)       \
+       (__l->__stats += __r->__stats)
+
+void
+bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
+               struct bfa_itnim_iostats_s *rstats)
+{
+       bfa_fcpim_add_iostats(lstats, rstats, total_ios);
+       bfa_fcpim_add_iostats(lstats, rstats, qresumes);
+       bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
+       bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
+       bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
+       bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
+       bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
+       bfa_fcpim_add_iostats(lstats, rstats, onlines);
+       bfa_fcpim_add_iostats(lstats, rstats, offlines);
+       bfa_fcpim_add_iostats(lstats, rstats, creates);
+       bfa_fcpim_add_iostats(lstats, rstats, deletes);
+       bfa_fcpim_add_iostats(lstats, rstats, create_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, sler_events);
+       bfa_fcpim_add_iostats(lstats, rstats, fw_create);
+       bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
+       bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
+       bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_success);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, io_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
+       bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
+       bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
+       bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+}
+
+bfa_status_t
+bfa_fcpim_port_iostats(struct bfa_s *bfa,
+               struct bfa_itnim_iostats_s *stats, u8 lp_tag)
+{
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+       struct list_head *qe, *qen;
+       struct bfa_itnim_s *itnim;
+
+       /* accumulate IO stats from itnim */
+       memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               if (itnim->rport->rport_info.lp_tag != lp_tag)
+                       continue;
+               bfa_fcpim_add_stats(stats, &(itnim->stats));
+       }
+       return BFA_STATUS_OK;
+}
+
 u16
 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 
        return fcpim->q_depth;
 }
@@ -990,8 +1042,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
 }
 
 void
-bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
 {
        /*
         * ITN memory
@@ -1000,15 +1051,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 }
 
 void
-bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
 {
        struct bfa_s    *bfa = fcpim->bfa;
+       struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
        struct bfa_itnim_s *itnim;
        int     i, j;
 
        INIT_LIST_HEAD(&fcpim->itnim_q);
 
-       itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
+       itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
        fcpim->itnim_arr = itnim;
 
        for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
@@ -1030,7 +1082,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
        }
 
-       bfa_meminfo_kva(minfo) = (u8 *) itnim;
+       bfa_mem_kva_curp(fcp) = (u8 *) itnim;
 }
 
 void
@@ -1043,7 +1095,7 @@ bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
 static bfa_boolean_t
 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
 {
-       struct bfi_itnim_create_req_s *m;
+       struct bfi_itn_create_req_s *m;
 
        itnim->msg_no++;
 
@@ -1056,8 +1108,8 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
                return BFA_FALSE;
        }
 
-       bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
-                       bfa_lpuid(itnim->bfa));
+       bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
+                       bfa_fn_lpu(itnim->bfa));
        m->fw_handle = itnim->rport->fw_handle;
        m->class = FC_CLASS_3;
        m->seq_rec = itnim->seq_rec;
@@ -1067,14 +1119,14 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(itnim->bfa, itnim->reqq);
+       bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
 static bfa_boolean_t
 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
 {
-       struct bfi_itnim_delete_req_s *m;
+       struct bfi_itn_delete_req_s *m;
 
        /*
         * check for room in queue to send request now
@@ -1085,15 +1137,15 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
                return BFA_FALSE;
        }
 
-       bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
-                       bfa_lpuid(itnim->bfa));
+       bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
+                       bfa_fn_lpu(itnim->bfa));
        m->fw_handle = itnim->rport->fw_handle;
        bfa_stats(itnim, fw_delete);
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(itnim->bfa, itnim->reqq);
+       bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -1224,7 +1276,7 @@ bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
 static void
 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
        fcpim->del_itn_stats.del_itn_iocomp_aborted +=
                itnim->stats.iocomp_aborted;
        fcpim->del_itn_stats.del_itn_iocomp_timedout +=
@@ -1250,8 +1302,8 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
 void
 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-       union bfi_itnim_i2h_msg_u msg;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+       union bfi_itn_i2h_msg_u msg;
        struct bfa_itnim_s *itnim;
 
        bfa_trc(bfa, m->mhdr.msg_id);
@@ -1259,7 +1311,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        msg.msg = m;
 
        switch (m->mhdr.msg_id) {
-       case BFI_ITNIM_I2H_CREATE_RSP:
+       case BFI_ITN_I2H_CREATE_RSP:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.create_rsp->bfa_handle);
                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
@@ -1267,7 +1319,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
                break;
 
-       case BFI_ITNIM_I2H_DELETE_RSP:
+       case BFI_ITN_I2H_DELETE_RSP:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.delete_rsp->bfa_handle);
                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
@@ -1275,7 +1327,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
                break;
 
-       case BFI_ITNIM_I2H_SLER_EVENT:
+       case BFI_ITN_I2H_SLER_EVENT:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.sler_event->bfa_handle);
                bfa_stats(itnim, sler_events);
@@ -1295,9 +1347,11 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 struct bfa_itnim_s *
 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfa_itnim_s *itnim;
 
+       bfa_itn_create(bfa, rport, bfa_itnim_isr);
+
        itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
        WARN_ON(itnim->rport != rport);
 
@@ -1991,7 +2045,8 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
                if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
                                        m->sns_len) {
                        sns_len = m->sns_len;
-                       snsinfo = ioim->iosp->snsinfo;
+                       snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+                                               ioim->iotag);
                }
 
                /*
@@ -2189,12 +2244,12 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
         */
        switch (m->cmnd.iodir) {
        case FCP_IODIR_READ:
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
                bfa_stats(itnim, input_reqs);
                ioim->itnim->stats.rd_throughput += fcp_dl;
                break;
        case FCP_IODIR_WRITE:
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
                bfa_stats(itnim, output_reqs);
                ioim->itnim->stats.wr_throughput += fcp_dl;
                break;
@@ -2202,16 +2257,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
                bfa_stats(itnim, input_reqs);
                bfa_stats(itnim, output_reqs);
        default:
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
        }
        if (itnim->seq_rec ||
            (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(ioim->bfa, ioim->reqq);
+       bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -2269,14 +2324,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
        else
                msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
 
-       bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
+       bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
        m->io_tag    = cpu_to_be16(ioim->iotag);
        m->abort_tag = ++ioim->abort_tag;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(ioim->bfa, ioim->reqq);
+       bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -2360,46 +2415,32 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  * Memory allocation and initialization.
  */
 void
-bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
 {
        struct bfa_ioim_s               *ioim;
+       struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
        struct bfa_ioim_sp_s    *iosp;
        u16             i;
-       u8                      *snsinfo;
-       u32             snsbufsz;
 
        /*
         * claim memory first
         */
-       ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
+       ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
        fcpim->ioim_arr = ioim;
-       bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
+       bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
 
-       iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
+       iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
        fcpim->ioim_sp_arr = iosp;
-       bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
-
-       /*
-        * Claim DMA memory for per IO sense data.
-        */
-       snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
-       fcpim->snsbase.pa  = bfa_meminfo_dma_phys(minfo);
-       bfa_meminfo_dma_phys(minfo) += snsbufsz;
-
-       fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
-       bfa_meminfo_dma_virt(minfo) += snsbufsz;
-       snsinfo = fcpim->snsbase.kva;
-       bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
+       bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
 
        /*
         * Initialize ioim free queues
         */
-       INIT_LIST_HEAD(&fcpim->ioim_free_q);
        INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
        INIT_LIST_HEAD(&fcpim->ioim_comp_q);
 
-       for (i = 0; i < fcpim->num_ioim_reqs;
-            i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
+       for (i = 0; i < fcpim->fcp->num_ioim_reqs;
+            i++, ioim++, iosp++) {
                /*
                 * initialize IOIM
                 */
@@ -2408,22 +2449,19 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                ioim->bfa     = fcpim->bfa;
                ioim->fcpim   = fcpim;
                ioim->iosp    = iosp;
-               iosp->snsinfo = snsinfo;
                INIT_LIST_HEAD(&ioim->sgpg_q);
                bfa_reqq_winit(&ioim->iosp->reqq_wait,
                                   bfa_ioim_qresume, ioim);
                bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
                                   bfa_ioim_sgpg_alloced, ioim);
                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
-
-               list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
        }
 }
 
 void
 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
        struct bfa_ioim_s *ioim;
        u16     iotag;
@@ -2507,7 +2545,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 void
 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
        struct bfa_ioim_s *ioim;
        u16     iotag;
@@ -2573,18 +2611,21 @@ struct bfa_ioim_s *
 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
                struct bfa_itnim_s *itnim, u16 nsges)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfa_ioim_s *ioim;
+       struct bfa_iotag_s *iotag = NULL;
 
        /*
         * alocate IOIM resource
         */
-       bfa_q_deq(&fcpim->ioim_free_q, &ioim);
-       if (!ioim) {
+       bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
+       if (!iotag) {
                bfa_stats(itnim, no_iotags);
                return NULL;
        }
 
+       ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
+
        ioim->dio = dio;
        ioim->itnim = itnim;
        ioim->nsges = nsges;
@@ -2601,7 +2642,8 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
 void
 bfa_ioim_free(struct bfa_ioim_s *ioim)
 {
-       struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
+       struct bfa_fcpim_s *fcpim = ioim->fcpim;
+       struct bfa_iotag_s *iotag;
 
        if (ioim->nsgpgs > 0)
                bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
@@ -2610,8 +2652,17 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
        fcpim->ios_active--;
 
        ioim->iotag &= BFA_IOIM_IOTAG_MASK;
+
+       WARN_ON(!(ioim->iotag <
+               (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
+       iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
+
+       if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
+               list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
+       else
+               list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
+
        list_del(&ioim->qe);
-       list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
 }
 
 void
@@ -3021,7 +3072,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
         * build i/o request message next
         */
        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
-                       bfa_lpuid(tskim->bfa));
+                       bfa_fn_lpu(tskim->bfa));
 
        m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
        m->itn_fhdl = tskim->itnim->rport->fw_handle;
@@ -3032,7 +3083,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(tskim->bfa, itnim->reqq);
+       bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -3056,14 +3107,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
         * build i/o request message next
         */
        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
-                       bfa_lpuid(tskim->bfa));
+                       bfa_fn_lpu(tskim->bfa));
 
        m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(tskim->bfa, itnim->reqq);
+       bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -3129,14 +3180,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
  * Memory allocation and initialization.
  */
 void
-bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
 {
        struct bfa_tskim_s *tskim;
+       struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
        u16     i;
 
        INIT_LIST_HEAD(&fcpim->tskim_free_q);
+       INIT_LIST_HEAD(&fcpim->tskim_unused_q);
 
-       tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
+       tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
        fcpim->tskim_arr = tskim;
 
        for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
@@ -3155,13 +3208,13 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
        }
 
-       bfa_meminfo_kva(minfo) = (u8 *) tskim;
+       bfa_mem_kva_curp(fcp) = (u8 *) tskim;
 }
 
 void
 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
        struct bfa_tskim_s *tskim;
        u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
@@ -3188,7 +3241,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 struct bfa_tskim_s *
 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfa_tskim_s *tskim;
 
        bfa_q_deq(&fcpim->tskim_free_q, &tskim);
@@ -3233,3 +3286,214 @@ bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
        list_add_tail(&tskim->qe, &itnim->tsk_q);
        bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
 }
+
+void
+bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
+{
+       struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
+               bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
+               list_add_tail(qe, &fcpim->tskim_unused_q);
+       }
+}
+
+/* BFA FCP module - parent module for fcpim */
+
+BFA_MODULE(fcp);
+
+static void
+bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_ios, num_io_req;
+       u32     km_len = 0;
+
+       /*
+        * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
+        * So if the values are non zero, adjust them appropriately.
+        */
+       if (cfg->fwcfg.num_ioim_reqs &&
+           cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
+               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+       else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
+               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+
+       if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
+               cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+
+       num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+       if (num_io_req > BFA_IO_MAX) {
+               if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
+                       cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
+                       cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
+               } else if (cfg->fwcfg.num_fwtio_reqs)
+                       cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+               else
+                       cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+       }
+
+       bfa_fcpim_meminfo(cfg, &km_len);
+
+       num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+       km_len += num_io_req * sizeof(struct bfa_iotag_s);
+       km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
+
+       /* dma memory */
+       nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+       per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
+
+       bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+               if (num_io_req >= per_seg_ios) {
+                       num_io_req -= per_seg_ios;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               per_seg_ios * BFI_IOIM_SNSLEN);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               num_io_req * BFI_IOIM_SNSLEN);
+       }
+
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, fcp_kva, km_len);
+}
+
+static void
+bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_pcidev_s *pcidev)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     idx, nsegs, num_io_req;
+
+       fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+       fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
+       fcp->num_itns   = cfg->fwcfg.num_rports;
+       fcp->bfa = bfa;
+
+       /*
+        * Setup the pool of snsbase addr's, that is passed to fw as
+        * part of bfi_iocfc_cfg_s.
+        */
+       num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+       nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+
+       bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+
+               if (!bfa_mem_dma_virt(seg_ptr))
+                       break;
+
+               fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
+               fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
+               bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
+       }
+
+       bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
+
+       bfa_iotag_attach(fcp);
+
+       fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
+       bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
+                       (fcp->num_itns * sizeof(struct bfa_itn_s));
+       memset(fcp->itn_arr, 0,
+                       (fcp->num_itns * sizeof(struct bfa_itn_s)));
+}
+
+static void
+bfa_fcp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_iocdisable(struct bfa_s *bfa)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+       /* Enqueue unused ioim resources to free_q */
+       list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
+
+       bfa_fcpim_iocdisable(fcp);
+}
+
+void
+bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
+{
+       struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
+               bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
+               list_add_tail(qe, &mod->iotag_unused_q);
+       }
+}
+
+void
+bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+               void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       struct bfa_itn_s *itn;
+
+       itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
+       itn->isr = isr;
+}
+
+/*
+ * Itn interrupt processing.
+ */
+void
+bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       union bfi_itn_i2h_msg_u msg;
+       struct bfa_itn_s *itn;
+
+       msg.msg = m;
+       itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
+
+       if (itn->isr)
+               itn->isr(bfa, m);
+       else
+               WARN_ON(1);
+}
+
+void
+bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
+{
+       struct bfa_iotag_s *iotag;
+       u16     num_io_req, i;
+
+       iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
+       fcp->iotag_arr = iotag;
+
+       INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
+       INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
+       INIT_LIST_HEAD(&fcp->iotag_unused_q);
+
+       num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
+       for (i = 0; i < num_io_req; i++, iotag++) {
+               memset(iotag, 0, sizeof(struct bfa_iotag_s));
+               iotag->tag = i;
+               if (i < fcp->num_ioim_reqs)
+                       list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
+               else
+                       list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
+       }
+
+       bfa_mem_kva_curp(fcp) = (u8 *) iotag;
+}
index 1e38dad..57b695a 100644 (file)
 #include "bfa_defs_svc.h"
 #include "bfa_cs.h"
 
+/* FCP module related definitions */
+#define BFA_IO_MAX     BFI_IO_MAX
+#define BFA_FWTIO_MAX  2000
+
+struct bfa_fcp_mod_s;
+struct bfa_iotag_s {
+       struct list_head        qe;     /* queue element        */
+       u16     tag;                    /* FW IO tag            */
+};
+
+struct bfa_itn_s {
+       bfa_isr_func_t isr;
+};
+
+void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+               void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
+void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
+
+#define BFA_FCP_MOD(_hal)      (&(_hal)->modules.fcp_mod)
+#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
+#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \
+       (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_ITN_FROM_TAG(_fcp, _tag)   \
+       ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
+#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
+       bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)
 
 #define BFA_ITNIM_MIN   32
 #define BFA_ITNIM_MAX   1024
@@ -75,25 +103,24 @@ struct bfad_tskim_s;
 
 typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
 
-struct bfa_fcpim_mod_s {
+struct bfa_fcpim_s {
        struct bfa_s            *bfa;
+       struct bfa_fcp_mod_s    *fcp;
        struct bfa_itnim_s      *itnim_arr;
        struct bfa_ioim_s       *ioim_arr;
        struct bfa_ioim_sp_s    *ioim_sp_arr;
        struct bfa_tskim_s      *tskim_arr;
-       struct bfa_dma_s        snsbase;
        int                     num_itnims;
-       int                     num_ioim_reqs;
        int                     num_tskim_reqs;
        u32                     path_tov;
        u16                     q_depth;
        u8                      reqq;           /*  Request queue to be used */
        u8                      rsvd;
        struct list_head        itnim_q;        /*  queue of active itnim */
-       struct list_head        ioim_free_q;    /*  free IO resources   */
        struct list_head        ioim_resfree_q; /*  IOs waiting for f/w */
        struct list_head        ioim_comp_q;    /*  IO global comp Q    */
        struct list_head        tskim_free_q;
+       struct list_head        tskim_unused_q; /* Unused tskim Q */
        u32                     ios_active;     /*  current active IOs  */
        u32                     delay_comp;
        struct bfa_fcpim_del_itn_stats_s del_itn_stats;
@@ -104,6 +131,25 @@ struct bfa_fcpim_mod_s {
        bfa_fcpim_profile_t     profile_start;
 };
 
+/* Max FCP dma segs required */
+#define BFA_FCP_DMA_SEGS       BFI_IOIM_SNSBUF_SEGS
+
+struct bfa_fcp_mod_s {
+       struct bfa_s            *bfa;
+       struct list_head        iotag_ioim_free_q;      /* free IO resources */
+       struct list_head        iotag_tio_free_q;       /* free IO resources */
+       struct list_head        iotag_unused_q; /* unused IO resources*/
+       struct bfa_iotag_s      *iotag_arr;
+       struct bfa_itn_s        *itn_arr;
+       int                     num_ioim_reqs;
+       int                     num_fwtio_reqs;
+       int                     num_itns;
+       struct bfa_dma_s        snsbase[BFA_FCP_DMA_SEGS];
+       struct bfa_fcpim_s      fcpim;
+       struct bfa_mem_dma_s    dma_seg[BFA_FCP_DMA_SEGS];
+       struct bfa_mem_kva_s    kva_seg;
+};
+
 /*
  * BFA IO (initiator mode)
  */
@@ -111,7 +157,7 @@ struct bfa_ioim_s {
        struct list_head        qe;             /*  queue elememt       */
        bfa_sm_t                sm;             /*  BFA ioim state machine */
        struct bfa_s            *bfa;           /*  BFA module  */
-       struct bfa_fcpim_mod_s  *fcpim;         /*  parent fcpim module */
+       struct bfa_fcpim_s      *fcpim;         /*  parent fcpim module */
        struct bfa_itnim_s      *itnim;         /*  i-t-n nexus for this IO  */
        struct bfad_ioim_s      *dio;           /*  driver IO handle    */
        u16                     iotag;          /*  FWI IO tag  */
@@ -129,7 +175,6 @@ struct bfa_ioim_s {
 
 struct bfa_ioim_sp_s {
        struct bfi_msg_s        comp_rspmsg;    /*  IO comp f/w response */
-       u8                      *snsinfo;       /*  sense info for this IO   */
        struct bfa_sgpg_wqe_s   sgpg_wqe;       /*  waitq elem for sgpg */
        struct bfa_reqq_wait_s  reqq_wait;      /*  to wait for room in reqq */
        bfa_boolean_t           abort_explicit; /*  aborted by OS       */
@@ -143,7 +188,7 @@ struct bfa_tskim_s {
        struct list_head        qe;
        bfa_sm_t                sm;
        struct bfa_s            *bfa;   /*  BFA module  */
-       struct bfa_fcpim_mod_s  *fcpim; /*  parent fcpim module */
+       struct bfa_fcpim_s      *fcpim; /*  parent fcpim module */
        struct bfa_itnim_s      *itnim; /*  i-t-n nexus for this IO  */
        struct bfad_tskim_s     *dtsk;  /*  driver task mgmt cmnd       */
        bfa_boolean_t           notify; /*  notify itnim on TM comp  */
@@ -182,13 +227,13 @@ struct bfa_itnim_s {
        struct bfa_wc_s wc;             /*  waiting counter     */
        struct bfa_timer_s timer;       /*  pending IO TOV       */
        struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
-       struct bfa_fcpim_mod_s *fcpim;  /*  fcpim module        */
+       struct bfa_fcpim_s *fcpim;      /*  fcpim module        */
        struct bfa_itnim_iostats_s      stats;
        struct bfa_itnim_ioprofile_s  ioprofile;
 };
 
 #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
-#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_FCPIM(_hal)        (&(_hal)->modules.fcp_mod.fcpim)
 #define BFA_IOIM_TAG_2_ID(_iotag)      ((_iotag) & BFA_IOIM_IOTAG_MASK)
 #define BFA_IOIM_FROM_TAG(_fcpim, _iotag)      \
        (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
@@ -196,9 +241,9 @@ struct bfa_itnim_s {
        (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
 
 #define bfa_io_profile_start_time(_bfa)        \
-       (_bfa->modules.fcpim_mod.io_profile_start_time)
+       ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time)
 #define bfa_fcpim_get_io_profile(_bfa) \
-       (_bfa->modules.fcpim_mod.io_profile)
+       ((_bfa)->modules.fcp_mod.fcpim.io_profile)
 #define bfa_ioim_update_iotag(__ioim) do {                             \
        uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;      \
        k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK;                    \
@@ -217,8 +262,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 /*
  * function prototypes
  */
-void   bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
-                                       struct bfa_meminfo_s *minfo);
+void   bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
 void   bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void   bfa_ioim_good_comp_isr(struct bfa_s *bfa,
                                        struct bfi_msg_s *msg);
@@ -228,18 +272,15 @@ void      bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
 void   bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
 void   bfa_ioim_tov(struct bfa_ioim_s *ioim);
 
-void   bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
-                                       struct bfa_meminfo_s *minfo);
+void   bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
 void   bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void   bfa_tskim_iodone(struct bfa_tskim_s *tskim);
 void   bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
 void   bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+void   bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
 
-void   bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                                       u32 *dm_len);
-void   bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
-                                       struct bfa_meminfo_s *minfo);
-void   bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
+void   bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
+void   bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
 void   bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
 void   bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void   bfa_itnim_iodone(struct bfa_itnim_s *itnim);
@@ -252,13 +293,17 @@ bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
 void   bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
 u16    bfa_fcpim_path_tov_get(struct bfa_s *bfa);
 u16    bfa_fcpim_qdepth_get(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
+                       struct bfa_itnim_iostats_s *stats, u8 lp_tag);
+void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
+                       struct bfa_itnim_iostats_s *itnim_stats);
 
 #define bfa_fcpim_ioredirect_enabled(__bfa)                            \
-       (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
+       (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
 
 #define bfa_fcpim_get_next_reqq(__bfa, __qid)                          \
 {                                                                      \
-       struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa);      \
+       struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa);      \
        __fcpim->reqq++;                                                \
        __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1);      \
        *(__qid) = __fcpim->reqq;                                       \
index 9b43ca4..a9b22bc 100644 (file)
@@ -92,25 +92,49 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 void
 bfa_fcs_init(struct bfa_fcs_s *fcs)
 {
-       int             i, npbc_vports;
+       int     i;
        struct bfa_fcs_mod_s  *mod;
-       struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
 
        for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
                mod = &fcs_modules[i];
                if (mod->modinit)
                        mod->modinit(fcs);
        }
+}
+
+/*
+ * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+ * with values learned during bfa_init firmware GETATTR REQ.
+ */
+void
+bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
+{
+       struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+       struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+       struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc;
+
+       port_cfg->nwwn = ioc->attr->nwwn;
+       port_cfg->pwwn = ioc->attr->pwwn;
+}
+
+/*
+ * fcs pbc vport initialization
+ */
+void
+bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs)
+{
+       int i, npbc_vports;
+       struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
+
        /* Initialize pbc vports */
        if (!fcs->min_cfg) {
                npbc_vports =
-                   bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
+                       bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
                for (i = 0; i < npbc_vports; i++)
                        bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
        }
 }
 
-
 /*
  *     brief
  *             FCS driver details initialization.
@@ -168,11 +192,14 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
 #define BFA_FCS_FABRIC_CLEANUP_DELAY   (10000) /* Milliseconds */
 
 #define bfa_fcs_fabric_set_opertype(__fabric) do {                     \
-               if (bfa_fcport_get_topology((__fabric)->fcs->bfa)       \
-                   == BFA_PORT_TOPOLOGY_P2P)                           \
+       if (bfa_fcport_get_topology((__fabric)->fcs->bfa)               \
+                               == BFA_PORT_TOPOLOGY_P2P) {             \
+               if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED)        \
                        (__fabric)->oper_type = BFA_PORT_TYPE_NPORT;    \
                else                                                    \
-                       (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT;   \
+                       (__fabric)->oper_type = BFA_PORT_TYPE_P2P;      \
+       } else                                                          \
+               (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT;           \
 } while (0)
 
 /*
@@ -196,6 +223,9 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
                                         u32 rsp_len,
                                         u32 resid_len,
                                         struct fchs_s *rspfchs);
+static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric);
+static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled(
+                               struct bfa_fcs_fabric_s *fabric);
 
 static void    bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
                                         enum bfa_fcs_fabric_event event);
@@ -269,8 +299,8 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
                break;
 
        case BFA_FCS_FABRIC_SM_DELETE:
-               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-               bfa_wc_down(&fabric->fcs->wc);
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+               bfa_fcs_fabric_delete(fabric);
                break;
 
        default:
@@ -322,7 +352,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
        case BFA_FCS_FABRIC_SM_CONT_OP:
 
                bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-                                          fabric->bb_credit);
+                                          fabric->bb_credit,
+                                          bfa_fcs_fabric_oper_bbscn(fabric));
                fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
 
                if (fabric->auth_reqd && fabric->is_auth) {
@@ -350,7 +381,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
        case BFA_FCS_FABRIC_SM_NO_FABRIC:
                fabric->fab_type = BFA_FCS_FABRIC_N2N;
                bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-                                          fabric->bb_credit);
+                                          fabric->bb_credit,
+                                          bfa_fcs_fabric_oper_bbscn(fabric));
                bfa_fcs_fabric_notify_online(fabric);
                bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
                break;
@@ -518,7 +550,11 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
        case BFA_FCS_FABRIC_SM_NO_FABRIC:
                bfa_trc(fabric->fcs, fabric->bb_credit);
                bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-                                          fabric->bb_credit);
+                                          fabric->bb_credit,
+                                          bfa_fcs_fabric_oper_bbscn(fabric));
+               break;
+
+       case BFA_FCS_FABRIC_SM_RETRY_OP:
                break;
 
        default:
@@ -764,6 +800,10 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 
        case BFA_STATUS_FABRIC_RJT:
                fabric->stats.flogi_rejects++;
+               if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR &&
+                   fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO)
+                       fabric->fcs->bbscn_flogi_rjt = BFA_TRUE;
+
                bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
                return;
 
@@ -793,6 +833,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
                 */
                fabric->bport.port_topo.pn2n.rem_port_wwn =
                        fabric->lps->pr_pwwn;
+               fabric->fab_type = BFA_FCS_FABRIC_N2N;
                bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
        }
 
@@ -808,13 +849,17 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
 {
        struct bfa_s            *bfa = fabric->fcs->bfa;
        struct bfa_lport_cfg_s  *pcfg = &fabric->bport.port_cfg;
-       u8                      alpa = 0;
+       u8                      alpa = 0, bb_scn = 0;
 
        if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
                alpa = bfa_fcport_get_myalpa(bfa);
 
+       if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
+           (!fabric->fcs->bbscn_flogi_rjt))
+               bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
+
        bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
-                     pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
+                     pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn);
 
        fabric->stats.flogi_sent++;
 }
@@ -872,6 +917,40 @@ bfa_fcs_fabric_delay(void *cbarg)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
 }
 
+/*
+ * Computes operating BB_SCN value
+ */
+static u8
+bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric)
+{
+       u8      pr_bbscn = fabric->lps->pr_bbscn;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
+
+       if (!(fcport->cfg.bb_scn_state && pr_bbscn))
+               return 0;
+
+       /* return max of local/remote bb_scn values */
+       return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ?
+               pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN);
+}
+
+/*
+ * Check if BB_SCN can be enabled.
+ */
+static bfa_boolean_t
+bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
+
+       if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) &&
+                       fcport->cfg.bb_scn_state &&
+                       !bfa_fcport_is_qos_enabled(fabric->fcs->bfa) &&
+                       !bfa_fcport_is_trunk_enabled(fabric->fcs->bfa))
+               return BFA_TRUE;
+       else
+               return BFA_FALSE;
+}
+
 /*
  * Delete all vports and wait for vport delete completions.
  */
@@ -989,6 +1068,7 @@ void
 bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
 {
        bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+       fabric->fcs->bbscn_flogi_rjt = BFA_FALSE;
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
 }
 
@@ -1192,6 +1272,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
        }
 
        fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
+       fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12);
        bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
        bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
 
@@ -1224,9 +1305,10 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
                                    n2n_port->reply_oxid, pcfg->pwwn,
                                    pcfg->nwwn,
                                    bfa_fcport_get_maxfrsize(bfa),
-                                   bfa_fcport_get_rx_bbcredit(bfa));
+                                   bfa_fcport_get_rx_bbcredit(bfa),
+                                   bfa_fcs_fabric_oper_bbscn(fabric));
 
-       bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
+       bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
                      BFA_FALSE, FC_CLASS_3,
                      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
                      FC_MAX_PDUSZ, 0);
@@ -1297,6 +1379,45 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
        return NULL;
 }
 
+/*
+ *     Return the list of local logical ports present in the given VF.
+ *
+ *     @param[in]      vf      vf for which logical ports are returned
+ *     @param[out]     lpwwn   returned logical port wwn list
+ *     @param[in,out]  nlports in:size of lpwwn list;
+ *                             out:total elements present,
+ *                             actual elements returned is limited by the size
+ */
+void
+bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
+{
+       struct list_head *qe;
+       struct bfa_fcs_vport_s *vport;
+       int     i = 0;
+       struct bfa_fcs_s        *fcs;
+
+       if (vf == NULL || lpwwn == NULL || *nlports == 0)
+               return;
+
+       fcs = vf->fcs;
+
+       bfa_trc(fcs, vf->vf_id);
+       bfa_trc(fcs, (uint32_t) *nlports);
+
+       lpwwn[i++] = vf->bport.port_cfg.pwwn;
+
+       list_for_each(qe, &vf->vport_q) {
+               if (i >= *nlports)
+                       break;
+
+               vport = (struct bfa_fcs_vport_s *) qe;
+               lpwwn[i++] = vport->lport.port_cfg.pwwn;
+       }
+
+       bfa_trc(fcs, i);
+       *nlports = i;
+}
+
 /*
  * BFA FCS PPORT ( physical port)
  */
index 61cdce4..a5f1faf 100644 (file)
@@ -254,6 +254,9 @@ struct bfa_fcs_fabric_s;
 #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ                        48
 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ               16
 
+/* bb_scn value in 2^bb_scn */
+#define BFA_FCS_PORT_DEF_BB_SCN                                3
+
 /*
  * Get FC port ID for a logical port.
  */
@@ -379,6 +382,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
 
 #define BFA_FCS_RPORT_DEF_DEL_TIMEOUT  90      /* in secs */
 #define BFA_FCS_RPORT_MAX_RETRIES      (5)
@@ -420,6 +424,7 @@ struct bfa_fcs_rport_s {
        enum fc_cos     fc_cos; /*  FC classes of service supp */
        bfa_boolean_t   cisc;   /*  CISC capable device */
        bfa_boolean_t   prlo;   /*  processing prlo or LOGO */
+       bfa_boolean_t   plogi_pending;  /* Rx Plogi Pending */
        wwn_t   pwwn;   /*  port wwn of rport */
        wwn_t   nwwn;   /*  node wwn of rport */
        struct bfa_rport_symname_s psym_name; /*  port symbolic name  */
@@ -447,6 +452,8 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
 /*
  * bfa fcs rport API functions
  */
+void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+                       struct bfa_rport_attr_s *attr);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
                                             wwn_t rpwwn);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
@@ -591,10 +598,21 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
 void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
                        struct fchs_s *fchs, u16 len);
 
-#define        BFA_FCS_FDMI_SUPORTED_SPEEDS  (FDMI_TRANS_SPEED_1G  |   \
-                                      FDMI_TRANS_SPEED_2G |    \
-                                      FDMI_TRANS_SPEED_4G |    \
-                                      FDMI_TRANS_SPEED_8G)
+#define BFA_FCS_FDMI_SUPP_SPEEDS_4G    (FDMI_TRANS_SPEED_1G  | \
+                               FDMI_TRANS_SPEED_2G |           \
+                               FDMI_TRANS_SPEED_4G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_8G    (FDMI_TRANS_SPEED_1G  | \
+                               FDMI_TRANS_SPEED_2G |           \
+                               FDMI_TRANS_SPEED_4G |           \
+                               FDMI_TRANS_SPEED_8G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_16G   (FDMI_TRANS_SPEED_2G  | \
+                               FDMI_TRANS_SPEED_4G |           \
+                               FDMI_TRANS_SPEED_8G |           \
+                               FDMI_TRANS_SPEED_16G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_10G   FDMI_TRANS_SPEED_10G
 
 /*
  * HBA Attribute Block : BFA internal representation. Note : Some variable
@@ -649,6 +667,8 @@ struct bfa_fcs_s {
        struct bfa_trc_mod_s  *trcmod;  /*  tracing module */
        bfa_boolean_t   vf_enabled;     /*  VF mode is enabled */
        bfa_boolean_t   fdmi_enabled;   /*  FDMI is enabled */
+       bfa_boolean_t   bbscn_enabled;  /*  Driver Config Parameter */
+       bfa_boolean_t   bbscn_flogi_rjt;/*  FLOGI reject due to BB_SCN */
        bfa_boolean_t min_cfg;          /* min cfg enabled/disabled */
        u16     port_vfid;      /*  port default VF ID */
        struct bfa_fcs_driver_info_s driver_info;
@@ -715,6 +735,8 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
                    struct bfad_s *bfad,
                    bfa_boolean_t min_cfg);
 void bfa_fcs_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
                              struct bfa_fcs_driver_info_s *driver_info);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
@@ -723,6 +745,7 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
  * bfa fcs vf public functions
  */
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
+void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
 
 /*
  * fabric protected interface functions
index e7b49f4..29b4108 100644 (file)
@@ -54,6 +54,7 @@ enum bfa_fcs_itnim_event {
        BFA_FCS_ITNIM_SM_INITIATOR = 9, /*  rport is initiator */
        BFA_FCS_ITNIM_SM_DELETE = 10,   /*  delete event from rport */
        BFA_FCS_ITNIM_SM_PRLO = 11,     /*  delete event from rport */
+       BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
 };
 
 static void    bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
@@ -178,6 +179,10 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
                                BFA_FCS_RETRY_TIMEOUT);
                break;
 
+       case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP:
+               bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+               break;
+
        case BFA_FCS_ITNIM_SM_OFFLINE:
                bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
                bfa_fcxp_discard(itnim->fcxp);
@@ -447,6 +452,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                                itnim->rport->scsi_function =
                                         BFA_RPORT_INITIATOR;
                                itnim->stats.prli_rsp_acc++;
+                               itnim->stats.initiator++;
                                bfa_sm_send_event(itnim,
                                                  BFA_FCS_ITNIM_SM_RSP_OK);
                                return;
@@ -472,6 +478,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
 
                itnim->stats.prli_rsp_rjt++;
+               if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
+                       bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP);
+                       return;
+               }
                bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
        }
 }
index 1d6be8c..f8251a9 100644 (file)
@@ -74,6 +74,7 @@ enum bfa_fcs_lport_event {
        BFA_FCS_PORT_SM_OFFLINE = 3,
        BFA_FCS_PORT_SM_DELETE = 4,
        BFA_FCS_PORT_SM_DELRPORT = 5,
+       BFA_FCS_PORT_SM_STOP = 6,
 };
 
 static void     bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
@@ -86,6 +87,8 @@ static void     bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
                                        enum bfa_fcs_lport_event event);
 static void     bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
                                        enum bfa_fcs_lport_event event);
+static void    bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+                                       enum bfa_fcs_lport_event event);
 
 static void
 bfa_fcs_lport_sm_uninit(
@@ -123,6 +126,12 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
                bfa_fcs_lport_deleted(port);
                break;
 
+       case BFA_FCS_PORT_SM_STOP:
+               /* If vport - send completion call back */
+               if (port->vport)
+                       bfa_fcs_vport_stop_comp(port->vport);
+               break;
+
        case BFA_FCS_PORT_SM_OFFLINE:
                break;
 
@@ -148,6 +157,23 @@ bfa_fcs_lport_sm_online(
                bfa_fcs_lport_offline_actions(port);
                break;
 
+       case BFA_FCS_PORT_SM_STOP:
+               __port_action[port->fabric->fab_type].offline(port);
+
+               if (port->num_rports == 0) {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+                       /* If vport - send completion call back */
+                       if (port->vport)
+                               bfa_fcs_vport_stop_comp(port->vport);
+               } else {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+                       list_for_each_safe(qe, qen, &port->rport_q) {
+                               rport = (struct bfa_fcs_rport_s *) qe;
+                               bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+                       }
+               }
+               break;
+
        case BFA_FCS_PORT_SM_DELETE:
 
                __port_action[port->fabric->fab_type].offline(port);
@@ -189,6 +215,21 @@ bfa_fcs_lport_sm_offline(
                bfa_fcs_lport_online_actions(port);
                break;
 
+       case BFA_FCS_PORT_SM_STOP:
+               if (port->num_rports == 0) {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+                       /* If vport - send completion call back */
+                       if (port->vport)
+                               bfa_fcs_vport_stop_comp(port->vport);
+               } else {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+                       list_for_each_safe(qe, qen, &port->rport_q) {
+                               rport = (struct bfa_fcs_rport_s *) qe;
+                               bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+                       }
+               }
+               break;
+
        case BFA_FCS_PORT_SM_DELETE:
                if (port->num_rports == 0) {
                        bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
@@ -211,6 +252,28 @@ bfa_fcs_lport_sm_offline(
        }
 }
 
+static void
+bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+                         enum bfa_fcs_lport_event event)
+{
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+       bfa_trc(port->fcs, event);
+
+       switch (event) {
+       case BFA_FCS_PORT_SM_DELRPORT:
+               if (port->num_rports == 0) {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+                       /* If vport - send completion call back */
+                       if (port->vport)
+                               bfa_fcs_vport_stop_comp(port->vport);
+               }
+               break;
+
+       default:
+               bfa_sm_fault(port->fcs, event);
+       }
+}
+
 static void
 bfa_fcs_lport_sm_deleting(
        struct bfa_fcs_lport_s *port,
@@ -264,6 +327,40 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
                          FC_MAX_PDUSZ, 0);
 }
 
+/*
+ * Send a FCCT Reject
+ */
+static void
+bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
+       struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
+{
+       struct fchs_s   fchs;
+       struct bfa_fcxp_s *fcxp;
+       struct bfa_rport_s *bfa_rport = NULL;
+       int             len;
+       struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
+       struct ct_hdr_s *ct_hdr;
+
+       bfa_trc(port->fcs, rx_fchs->d_id);
+       bfa_trc(port->fcs, rx_fchs->s_id);
+
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       if (!fcxp)
+               return;
+
+       ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
+       ct_hdr->gs_type = rx_cthdr->gs_type;
+       ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
+
+       len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
+                       bfa_fcs_lport_get_fcid(port),
+                       rx_fchs->ox_id, reason_code, reason_code_expl);
+
+       bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+                       BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+                       FC_MAX_PDUSZ, 0);
+}
+
 /*
  * Process incoming plogi from a remote port.
  */
@@ -647,6 +744,16 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
                        bfa_fcs_lport_abts_acc(lport, fchs);
                return;
        }
+
+       if (fchs->type == FC_TYPE_SERVICES) {
+               /*
+                * Unhandled FC-GS frames. Send a FC-CT Reject
+                */
+               bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
+                               CT_NS_EXP_NOADDITIONAL);
+               return;
+       }
+
        /*
         * look for a matching remote port ID
         */
@@ -835,8 +942,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
        lport->fcs = fcs;
        lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
        lport->vport = vport;
-       lport->lp_tag = (vport) ? vport->lps->lp_tag :
-                                 lport->fabric->lps->lp_tag;
+       lport->lp_tag = (vport) ? vport->lps->bfa_tag :
+                                 lport->fabric->lps->bfa_tag;
 
        INIT_LIST_HEAD(&lport->rport_q);
        lport->num_rports = 0;
@@ -1074,6 +1181,8 @@ static void       bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
 static void    bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                  struct bfa_fcs_fdmi_port_attr_s *port_attr);
+u32    bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
+
 /*
  *  fcs_fdmi_sm FCS FDMI state machine
  */
@@ -1672,7 +1781,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
        memcpy(attr->value, fcs_hba_attr->driver_version, templen);
        templen = fc_roundup(templen, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
-       len += templen;;
+       len += templen;
        count++;
        attr->len = cpu_to_be16(templen + sizeof(attr->type) +
                             sizeof(templen));
@@ -2160,12 +2269,36 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
        /*
         * Supported Speeds
         */
-       port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
+       switch (pport_attr.speed_supported) {
+       case BFA_PORT_SPEED_16GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
+               break;
+
+       case BFA_PORT_SPEED_10GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
+               break;
+
+       case BFA_PORT_SPEED_8GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
+               break;
+
+       case BFA_PORT_SPEED_4GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
+               break;
+
+       default:
+               bfa_sm_fault(port->fcs, pport_attr.speed_supported);
+       }
 
        /*
         * Current Speed
         */
-       port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
+       port_attr->curr_speed = cpu_to_be32(
+                               bfa_fcs_fdmi_convert_speed(pport_attr.speed));
 
        /*
         * Max PDU Size.
@@ -2186,6 +2319,41 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 
 }
 
+/*
+ * Convert BFA speed to FDMI format.
+ */
+u32
+bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
+{
+       u32     ret;
+
+       switch (pport_speed) {
+       case BFA_PORT_SPEED_1GBPS:
+       case BFA_PORT_SPEED_2GBPS:
+               ret = pport_speed;
+               break;
+
+       case BFA_PORT_SPEED_4GBPS:
+               ret = FDMI_TRANS_SPEED_4G;
+               break;
+
+       case BFA_PORT_SPEED_8GBPS:
+               ret = FDMI_TRANS_SPEED_8G;
+               break;
+
+       case BFA_PORT_SPEED_10GBPS:
+               ret = FDMI_TRANS_SPEED_10G;
+               break;
+
+       case BFA_PORT_SPEED_16GBPS:
+               ret = FDMI_TRANS_SPEED_16G;
+               break;
+
+       default:
+               ret = FDMI_TRANS_SPEED_UNKNOWN;
+       }
+       return ret;
+}
 
 void
 bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
@@ -2829,7 +2997,8 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
                             bfa_hton3b(FC_MGMT_SERVER),
                             bfa_fcs_lport_get_fcid(port), 0,
                             port->port_cfg.pwwn, port->port_cfg.nwwn,
-                                bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                            bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                            bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len, &fchs,
@@ -3573,7 +3742,7 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
        if (!fcxp) {
                port->stats.ns_plogi_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
@@ -3586,7 +3755,8 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
                             bfa_hton3b(FC_NAME_SERVER),
                             bfa_fcs_lport_get_fcid(port), 0,
                             port->port_cfg.pwwn, port->port_cfg.nwwn,
-                                bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                            bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                            bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len, &fchs,
@@ -4762,8 +4932,8 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
        while (qe != qh) {
                rport = (struct bfa_fcs_rport_s *) qe;
                if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
-                       (bfa_fcs_rport_get_state(rport) ==
-                         BFA_RPORT_OFFLINE)) {
+                       (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
+                       (rport->scsi_function != BFA_RPORT_TARGET)) {
                        qe = bfa_q_next(qe);
                        continue;
                }
@@ -4776,17 +4946,15 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
                                bfa_fcport_get_ratelim_speed(port->fcs->bfa);
                }
 
-               if      ((rport_speed  == BFA_PORT_SPEED_8GBPS) ||
-                       (rport_speed > port_speed)) {
+               if (rport_speed > max_speed)
                        max_speed = rport_speed;
-                       break;
-               } else if (rport_speed > max_speed) {
-                       max_speed = rport_speed;
-               }
 
                qe = bfa_q_next(qe);
        }
 
+       if (max_speed > port_speed)
+               max_speed = port_speed;
+
        bfa_trc(fcs, max_speed);
        return max_speed;
 }
@@ -4918,6 +5086,7 @@ enum bfa_fcs_vport_event {
        BFA_FCS_VPORT_SM_DELCOMP = 11,  /*  lport delete completion */
        BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12,      /*  Dup wnn error*/
        BFA_FCS_VPORT_SM_RSP_FAILED = 13,       /*  non-retryable failure */
+       BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
 };
 
 static void     bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
@@ -4930,6 +5099,8 @@ static void     bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
                                       enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
                                             enum bfa_fcs_vport_event event);
+static void    bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
                                        enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
@@ -4940,6 +5111,10 @@ static void     bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
                                      enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
                                      enum bfa_fcs_vport_event event);
+static void    bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
+static void    bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
 
 static struct bfa_sm_table_s  vport_sm_table[] = {
        {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
@@ -4947,6 +5122,7 @@ static struct bfa_sm_table_s  vport_sm_table[] = {
        {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
        {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
        {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
+       {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
        {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
        {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
        {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
@@ -5042,6 +5218,11 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
                bfa_fcs_vport_do_fdisc(vport);
                break;
 
+       case BFA_FCS_VPORT_SM_STOP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+               bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+               break;
+
        case BFA_FCS_VPORT_SM_OFFLINE:
                /*
                 * This can happen if the vport couldn't be initialzied
@@ -5070,9 +5251,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
 
        switch (event) {
        case BFA_FCS_VPORT_SM_DELETE:
-               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
-               bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
-               bfa_fcs_lport_delete(&vport->lport);
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
                break;
 
        case BFA_FCS_VPORT_SM_OFFLINE:
@@ -5139,6 +5318,41 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
        }
 }
 
+/*
+ * FDISC is in progress and we got a vport delete request -
+ * this is a wait state while we wait for fdisc response and
+ * we will transition to the appropriate state - on rsp status.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+                               enum bfa_fcs_vport_event event)
+{
+       bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+       bfa_trc(__vport_fcs(vport), event);
+
+       switch (event) {
+       case BFA_FCS_VPORT_SM_RSP_OK:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+               bfa_fcs_lport_delete(&vport->lport);
+               break;
+
+       case BFA_FCS_VPORT_SM_DELETE:
+               break;
+
+       case BFA_FCS_VPORT_SM_OFFLINE:
+       case BFA_FCS_VPORT_SM_RSP_ERROR:
+       case BFA_FCS_VPORT_SM_RSP_FAILED:
+       case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+               bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+               bfa_fcs_lport_delete(&vport->lport);
+               break;
+
+       default:
+               bfa_sm_fault(__vport_fcs(vport), event);
+       }
+}
+
 /*
  * Vport is online (FDISC is complete).
  */
@@ -5155,6 +5369,11 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
                bfa_fcs_lport_delete(&vport->lport);
                break;
 
+       case BFA_FCS_VPORT_SM_STOP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
+               bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+               break;
+
        case BFA_FCS_VPORT_SM_OFFLINE:
                bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
                bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
@@ -5166,6 +5385,32 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
        }
 }
 
+/*
+ * Vport is being stopped - awaiting lport stop completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+                         enum bfa_fcs_vport_event event)
+{
+       bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+       bfa_trc(__vport_fcs(vport), event);
+
+       switch (event) {
+       case BFA_FCS_VPORT_SM_STOPCOMP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
+               bfa_fcs_vport_do_logo(vport);
+               break;
+
+       case BFA_FCS_VPORT_SM_OFFLINE:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+               break;
+
+       default:
+               bfa_sm_fault(__vport_fcs(vport), event);
+       }
+}
+
 /*
  * Vport is being deleted - awaiting lport delete completion to send
  * LOGO to fabric.
@@ -5236,6 +5481,10 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
                bfa_fcs_vport_free(vport);
                break;
 
+       case BFA_FCS_VPORT_SM_STOPCOMP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+               break;
+
        case BFA_FCS_VPORT_SM_DELETE:
                break;
 
@@ -5244,6 +5493,34 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
        }
 }
 
+/*
+ * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+                              enum bfa_fcs_vport_event event)
+{
+       bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+       bfa_trc(__vport_fcs(vport), event);
+
+       switch (event) {
+       case BFA_FCS_VPORT_SM_OFFLINE:
+               bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+               /*
+                * !!! fall through !!!
+                */
+
+       case BFA_FCS_VPORT_SM_RSP_OK:
+       case BFA_FCS_VPORT_SM_RSP_ERROR:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+               break;
+
+       default:
+               bfa_sm_fault(__vport_fcs(vport), event);
+       }
+}
+
 /*
  * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
  * is done.
@@ -5391,7 +5668,10 @@ void
 bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
 {
        vport->vport_stats.fab_online++;
-       bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+       if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
+               bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+       else
+               vport->vport_stats.fab_no_npiv++;
 }
 
 /*
@@ -5421,6 +5701,15 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
 }
 
+/*
+ * Stop completion callback from associated lport
+ */
+void
+bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
+{
+       bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
+}
+
 /*
  * Delete completion callback from associated lport
  */
index caaee6f..2c51445 100644 (file)
@@ -262,6 +262,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
+       case RPSM_EVENT_PLOGI_COMP:
        case RPSM_EVENT_SCN:
                /*
                 * Ignore, SCN is possibly online notification.
@@ -470,6 +471,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PRLO_RCVD:
+       case RPSM_EVENT_PLOGI_COMP:
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
@@ -484,9 +486,9 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+               rport->plogi_pending = BFA_TRUE;
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
                bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
-               bfa_fcs_rport_send_plogiacc(rport, NULL);
                break;
 
        case RPSM_EVENT_DELETE:
@@ -891,6 +893,18 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
 
        switch (event) {
        case RPSM_EVENT_HCB_OFFLINE:
+               if (bfa_fcs_lport_is_online(rport->port) &&
+                   (rport->plogi_pending)) {
+                       rport->plogi_pending = BFA_FALSE;
+                       bfa_sm_set_state(rport,
+                               bfa_fcs_rport_sm_plogiacc_sending);
+                       bfa_fcs_rport_send_plogiacc(rport, NULL);
+                       break;
+               }
+               /*
+                * !! fall through !!
+                */
+
        case RPSM_EVENT_ADDRESS_CHANGE:
                if (bfa_fcs_lport_is_online(rport->port)) {
                        if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
@@ -921,6 +935,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_SCN:
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
+       case RPSM_EVENT_PLOGI_RCVD:
+       case RPSM_EVENT_LOGO_IMP:
                /*
                 * Ignore, already offline.
                 */
@@ -957,10 +973,18 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
                 */
                if (bfa_fcs_lport_is_online(rport->port) &&
                        (!BFA_FCS_PID_IS_WKA(rport->pid))) {
-                       bfa_sm_set_state(rport,
-                               bfa_fcs_rport_sm_nsdisc_sending);
-                       rport->ns_retries = 0;
-                       bfa_fcs_rport_send_nsdisc(rport, NULL);
+                       if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+                               bfa_sm_set_state(rport,
+                                       bfa_fcs_rport_sm_nsdisc_sending);
+                               rport->ns_retries = 0;
+                               bfa_fcs_rport_send_nsdisc(rport, NULL);
+                       } else {
+                               /* For N2N  Direct Attach, try to re-login */
+                               bfa_sm_set_state(rport,
+                                       bfa_fcs_rport_sm_plogi_sending);
+                               rport->plogi_retries = 0;
+                               bfa_fcs_rport_send_plogi(rport, NULL);
+                       }
                } else {
                        /*
                         * if it is not a well known address, reset the
@@ -1356,7 +1380,8 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
                                bfa_fcs_lport_get_fcid(port), 0,
                                port->port_cfg.pwwn, port->port_cfg.nwwn,
-                               bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                               bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                               bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                        FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1476,7 +1501,8 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
                                 rport->pid, bfa_fcs_lport_get_fcid(port),
                                 rport->reply_oxid, port->port_cfg.pwwn,
                                 port->port_cfg.nwwn,
-                                bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                                bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                                bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                        FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -2024,6 +2050,11 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 
        rport->stats.onlines++;
 
+       if ((!rport->pid) || (!rport->pwwn)) {
+               bfa_trc(rport->fcs, rport->pid);
+               bfa_sm_fault(rport->fcs, rport->pid);
+       }
+
        if (bfa_fcs_lport_is_initiator(port)) {
                bfa_fcs_itnim_rport_online(rport->itnim);
                if (!BFA_FCS_PID_IS_WKA(rport->pid))
@@ -2047,6 +2078,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
        char    rpwwn_buf[BFA_STRING_32];
 
        rport->stats.offlines++;
+       rport->plogi_pending = BFA_FALSE;
 
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
        wwn2str(rpwwn_buf, rport->pwwn);
@@ -2120,7 +2152,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
 
                port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
                bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
-                                         port->fabric->bb_credit);
+                                         port->fabric->bb_credit, 0);
        }
 
 }
@@ -2233,22 +2265,6 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-static int
-wwn_compare(wwn_t wwn1, wwn_t wwn2)
-{
-       u8              *b1 = (u8 *) &wwn1;
-       u8              *b2 = (u8 *) &wwn2;
-       int             i;
-
-       for (i = 0; i < sizeof(wwn_t); i++) {
-               if (b1[i] < b2[i])
-                       return -1;
-               if (b1[i] > b2[i])
-                       return 1;
-       }
-       return 0;
-}
-
 /*
  *     Called by bport/vport to handle PLOGI received from an existing
  *      remote port.
@@ -2266,19 +2282,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
        rport->reply_oxid = rx_fchs->ox_id;
        bfa_trc(rport->fcs, rport->reply_oxid);
 
-       /*
-        * In Switched fabric topology,
-        * PLOGI to each other. If our pwwn is smaller, ignore it,
-        * if it is not a well known address.
-        * If the link topology is N2N,
-        * this Plogi should be accepted.
-        */
-       if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) &&
-               (bfa_fcs_fabric_is_switched(rport->port->fabric)) &&
-               (!BFA_FCS_PID_IS_WKA(rport->pid))) {
-               bfa_trc(rport->fcs, rport->pid);
-               return;
-       }
+       rport->pid = rx_fchs->s_id;
+       bfa_trc(rport->fcs, rport->pid);
 
        rport->stats.plogi_rcvd++;
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
@@ -2531,7 +2536,45 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
        bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
 }
 
-
+void
+bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+               struct bfa_rport_attr_s *rport_attr)
+{
+       struct bfa_rport_qos_attr_s qos_attr;
+       struct bfa_fcs_lport_s *port = rport->port;
+       bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
+
+       memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+       memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
+
+       rport_attr->pid = rport->pid;
+       rport_attr->pwwn = rport->pwwn;
+       rport_attr->nwwn = rport->nwwn;
+       rport_attr->cos_supported = rport->fc_cos;
+       rport_attr->df_sz = rport->maxfrsize;
+       rport_attr->state = bfa_fcs_rport_get_state(rport);
+       rport_attr->fc_cos = rport->fc_cos;
+       rport_attr->cisc = rport->cisc;
+       rport_attr->scsi_function = rport->scsi_function;
+       rport_attr->curr_speed  = rport->rpf.rpsc_speed;
+       rport_attr->assigned_speed  = rport->rpf.assigned_speed;
+
+       qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+       qos_attr.qos_flow_id =
+               cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+       rport_attr->qos_attr = qos_attr;
+
+       rport_attr->trl_enforced = BFA_FALSE;
+       if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
+           (rport->scsi_function == BFA_RPORT_TARGET)) {
+               if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
+                       rport_speed =
+                               bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
+
+               if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
+                       rport_attr->trl_enforced = BFA_TRUE;
+       }
+}
 
 /*
  * Remote port implementation.
index 977e681..e7ffd82 100644 (file)
 
 #include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfi_cbreg.h"
+#include "bfi_reg.h"
 
 void
 bfa_hwcb_reginit(struct bfa_s *bfa)
 {
        struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
        void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-       int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+       int     fn = bfa_ioc_pcifn(&bfa->ioc);
 
        if (fn == 0) {
                bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -33,29 +33,6 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
                bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
                bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
        }
-
-       for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
-               /*
-                * CPE registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
-               bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
-               bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
-
-               /*
-                * RME registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
-               bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
-               bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
-       }
-}
-
-void
-bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
-{
 }
 
 static void
@@ -65,11 +42,6 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
                        bfa->iocfc.bfa_regs.intr_status);
 }
 
-void
-bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
-{
-}
-
 static void
 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
 {
@@ -103,44 +75,72 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
        *num_vecs = __HFN_NUMINTS;
 }
 
+/*
+ * Dummy interrupt handler for handling spurious interrupts.
+ */
+static void
+bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
 /*
  * No special setup required for crossbow -- vector assignments are implicit.
  */
 void
 bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
 {
-       int i;
-
        WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
 
        bfa->msix.nvecs = nvecs;
-       if (nvecs == 1) {
-               for (i = 0; i < BFA_MSIX_CB_MAX; i++)
+       bfa_hwcb_msix_uninstall(bfa);
+}
+
+void
+bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
+{
+       int i;
+
+       if (bfa->msix.nvecs == 0)
+               return;
+
+       if (bfa->msix.nvecs == 1) {
+               for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
                        bfa->msix.handler[i] = bfa_msix_all;
                return;
        }
 
-       for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
-               bfa->msix.handler[i] = bfa_msix_reqq;
-
-       for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
-               bfa->msix.handler[i] = bfa_msix_rspq;
-
-       for (; i < BFA_MSIX_CB_MAX; i++)
+       for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
                bfa->msix.handler[i] = bfa_msix_lpu_err;
 }
 
-/*
- * Crossbow -- dummy, interrupts are masked
- */
 void
-bfa_hwcb_msix_install(struct bfa_s *bfa)
+bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
 {
+       int i;
+
+       if (bfa->msix.nvecs == 0)
+               return;
+
+       if (bfa->msix.nvecs == 1) {
+               for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+                       bfa->msix.handler[i] = bfa_msix_all;
+               return;
+       }
+
+       for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
+               bfa->msix.handler[i] = bfa_msix_reqq;
+
+       for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+               bfa->msix.handler[i] = bfa_msix_rspq;
 }
 
 void
 bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
 {
+       int i;
+
+       for (i = 0; i < BFI_MSIX_CB_MAX; i++)
+               bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
 }
 
 /*
@@ -156,6 +156,6 @@ bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
 void
 bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
 {
-       *start = BFA_MSIX_RME_Q0;
-       *end = BFA_MSIX_RME_Q7;
+       *start = BFI_MSIX_RME_QMIN_CB;
+       *end = BFI_MSIX_RME_QMAX_CB;
 }
index 21018d9..989bbce 100644 (file)
 
 #include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 
 BFA_TRC_FILE(HAL, IOCFC_CT);
 
-static u32 __ct_msix_err_vec_reg[] = {
-       HOST_MSIX_ERR_INDEX_FN0,
-       HOST_MSIX_ERR_INDEX_FN1,
-       HOST_MSIX_ERR_INDEX_FN2,
-       HOST_MSIX_ERR_INDEX_FN3,
-};
-
-static void
-bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
-{
-       int fn = bfa_ioc_pcifn(&bfa->ioc);
-       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-
-       if (msix)
-               writel(vec, kva + __ct_msix_err_vec_reg[fn]);
-       else
-               writel(0, kva + __ct_msix_err_vec_reg[fn]);
-}
-
 /*
  * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
  */
@@ -53,7 +34,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
 {
        struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
        void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-       int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+       int     fn = bfa_ioc_pcifn(&bfa->ioc);
 
        if (fn == 0) {
                bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -62,26 +43,16 @@ bfa_hwct_reginit(struct bfa_s *bfa)
                bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
                bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
        }
+}
 
-       for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
-               /*
-                * CPE registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5));
-               bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5));
-               bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5));
-               bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5));
-
-               /*
-                * RME registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5));
-               bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5));
-               bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5));
-               bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5));
-       }
+void
+bfa_hwct2_reginit(struct bfa_s *bfa)
+{
+       struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+       void __iomem    *kva = bfa_ioc_bar0(&bfa->ioc);
+
+       bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS);
+       bfa_regs->intr_mask   = (kva + CT2_HOSTFN_INTR_MASK);
 }
 
 void
@@ -106,9 +77,9 @@ void
 bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
                 u32 *num_vecs, u32 *max_vec_bit)
 {
-       *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1;
-       *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1));
-       *num_vecs = BFA_MSIX_CT_MAX;
+       *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1;
+       *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1));
+       *num_vecs = BFI_MSIX_CT_MAX;
 }
 
 /*
@@ -117,7 +88,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
 void
 bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 {
-       WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
+       WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
        bfa_trc(bfa, nvecs);
 
        bfa->msix.nvecs = nvecs;
@@ -125,7 +96,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 }
 
 void
-bfa_hwct_msix_install(struct bfa_s *bfa)
+bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
+{
+       if (bfa->msix.nvecs == 0)
+               return;
+
+       if (bfa->msix.nvecs == 1)
+               bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
+       else
+               bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwct_msix_queue_install(struct bfa_s *bfa)
 {
        int i;
 
@@ -133,19 +116,16 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
                return;
 
        if (bfa->msix.nvecs == 1) {
-               for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+               for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
                        bfa->msix.handler[i] = bfa_msix_all;
                return;
        }
 
-       for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++)
+       for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++)
                bfa->msix.handler[i] = bfa_msix_reqq;
 
-       for (; i <= BFA_MSIX_RME_Q3; i++)
+       for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
                bfa->msix.handler[i] = bfa_msix_rspq;
-
-       WARN_ON(i != BFA_MSIX_LPU_ERR);
-       bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
 }
 
 void
@@ -153,7 +133,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
 {
        int i;
 
-       for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+       for (i = 0; i < BFI_MSIX_CT_MAX; i++)
                bfa->msix.handler[i] = bfa_hwct_msix_dummy;
 }
 
@@ -164,13 +144,12 @@ void
 bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
 {
        bfa_trc(bfa, 0);
-       bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR);
        bfa_ioc_isr_mode_set(&bfa->ioc, msix);
 }
 
 void
 bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
 {
-       *start = BFA_MSIX_RME_Q0;
-       *end = BFA_MSIX_RME_Q3;
+       *start = BFI_MSIX_RME_QMIN_CT;
+       *end = BFI_MSIX_RME_QMAX_CT;
 }
index 6c7e033..d6c2bf3 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_ioc.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 #include "bfa_defs.h"
 #include "bfa_defs_svc.h"
 
@@ -29,8 +29,8 @@ BFA_TRC_FILE(CNA, IOC);
 #define BFA_IOC_TOV            3000    /* msecs */
 #define BFA_IOC_HWSEM_TOV      500     /* msecs */
 #define BFA_IOC_HB_TOV         500     /* msecs */
-#define BFA_IOC_HWINIT_MAX     5
 #define BFA_IOC_TOV_RECOVER     BFA_IOC_HB_TOV
+#define BFA_IOC_POLL_TOV       BFA_TIMER_FREQ
 
 #define bfa_ioc_timer_start(__ioc)                                     \
        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
@@ -79,14 +79,17 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE;
 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
 static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
+                               enum bfa_ioc_event_e event);
 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
@@ -105,11 +108,12 @@ enum ioc_event {
        IOC_E_ENABLED           = 5,    /*  f/w enabled         */
        IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
        IOC_E_DISABLED          = 7,    /*  f/w disabled                */
-       IOC_E_INITFAILED        = 8,    /*  failure notice by iocpf sm  */
-       IOC_E_PFFAILED          = 9,    /*  failure notice by iocpf sm  */
-       IOC_E_HBFAIL            = 10,   /*  heartbeat failure           */
-       IOC_E_HWERROR           = 11,   /*  hardware error interrupt    */
-       IOC_E_TIMEOUT           = 12,   /*  timeout                     */
+       IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
+       IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
+       IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
+       IOC_E_TIMEOUT           = 11,   /*  timeout                     */
+       IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
+       IOC_E_FWRSP_ACQ_ADDR    = 13,   /*  Acquiring address           */
 };
 
 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -121,6 +125,8 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
 
 static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -132,6 +138,8 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+       {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
+       {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
 };
 
 /*
@@ -143,9 +151,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
                        bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 #define bfa_iocpf_timer_stop(__ioc)    bfa_timer_stop(&(__ioc)->ioc_timer)
 
-#define bfa_iocpf_recovery_timer_start(__ioc)                          \
+#define bfa_iocpf_poll_timer_start(__ioc)                              \
        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
-                       bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
+                       bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 
 #define bfa_sem_timer_start(__ioc)                                     \
        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
@@ -157,6 +165,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
  */
 static void bfa_iocpf_timeout(void *ioc_arg);
 static void bfa_iocpf_sem_timeout(void *ioc_arg);
+static void bfa_iocpf_poll_timeout(void *ioc_arg);
 
 /*
  * IOCPF state machine events
@@ -173,6 +182,7 @@ enum iocpf_event {
        IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
        IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
        IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
+       IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
 };
 
 /*
@@ -314,11 +324,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
                /* !!! fall through !!! */
        case IOC_E_HWERROR:
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
                break;
 
+       case IOC_E_HWFAILED:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+               break;
+
        case IOC_E_DISABLE:
                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
                break;
@@ -356,17 +371,23 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
        case IOC_E_FWRSP_GETATTR:
                bfa_ioc_timer_stop(ioc);
                bfa_ioc_check_attr_wwns(ioc);
+               bfa_ioc_hb_monitor(ioc);
                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
                break;
 
+       case IOC_E_FWRSP_ACQ_ADDR:
+               bfa_ioc_timer_stop(ioc);
+               bfa_ioc_hb_monitor(ioc);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
                break;
+
        case IOC_E_PFFAILED:
        case IOC_E_HWERROR:
                bfa_ioc_timer_stop(ioc);
                /* !!! fall through !!! */
        case IOC_E_TIMEOUT:
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
                break;
@@ -384,6 +405,50 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
        }
 }
 
+/*
+ * Acquiring address from fabric (entry function)
+ */
+static void
+bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ *     Acquiring address from the fabric
+ */
+static void
+bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+       bfa_trc(ioc, event);
+
+       switch (event) {
+       case IOC_E_FWRSP_GETATTR:
+               bfa_ioc_check_attr_wwns(ioc);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+               break;
+
+       case IOC_E_PFFAILED:
+       case IOC_E_HWERROR:
+               bfa_hb_timer_stop(ioc);
+       case IOC_E_HBFAIL:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+               if (event != IOC_E_PFFAILED)
+                       bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+               break;
+
+       case IOC_E_DISABLE:
+               bfa_hb_timer_stop(ioc);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+               break;
+
+       case IOC_E_ENABLE:
+               break;
+
+       default:
+               bfa_sm_fault(ioc, event);
+       }
+}
 
 static void
 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
@@ -391,7 +456,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
-       bfa_ioc_hb_monitor(ioc);
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 }
 
@@ -414,13 +479,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
                bfa_hb_timer_stop(ioc);
                /* !!! fall through !!! */
        case IOC_E_HBFAIL:
-               bfa_ioc_fail_notify(ioc);
-
                if (ioc->iocpf.auto_recover)
                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
                else
                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 
+               bfa_ioc_fail_notify(ioc);
+
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
                break;
@@ -461,6 +526,11 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
                break;
 
+       case IOC_E_HWFAILED:
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+               bfa_ioc_disable_comp(ioc);
+               break;
+
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -525,12 +595,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
                 * Initialization retry failed.
                 */
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
                break;
 
-       case IOC_E_INITFAILED:
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+       case IOC_E_HWFAILED:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
                break;
 
        case IOC_E_ENABLE:
@@ -590,6 +662,35 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
        }
 }
 
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
+{
+       bfa_trc(ioc, 0);
+}
+
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+       bfa_trc(ioc, event);
+
+       switch (event) {
+       case IOC_E_ENABLE:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               break;
+
+       case IOC_E_DISABLE:
+               ioc->cbfn->disable_cbfn(ioc->bfa);
+               break;
+
+       case IOC_E_DETACH:
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+               break;
+
+       default:
+               bfa_sm_fault(ioc, event);
+       }
+}
+
 /*
  * IOCPF State Machine
  */
@@ -600,7 +701,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 static void
 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 {
-       iocpf->retry_count = 0;
+       iocpf->fw_mismatch_notified = BFA_FALSE;
        iocpf->auto_recover = bfa_auto_recover;
 }
 
@@ -633,6 +734,28 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 {
+       struct bfi_ioc_image_hdr_s      fwhdr;
+       u32     fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+
+       /* h/w sem init */
+       if (fwstate == BFI_IOC_UNINIT)
+               goto sem_get;
+
+       bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
+
+       if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+               goto sem_get;
+
+       bfa_trc(iocpf->ioc, fwstate);
+       bfa_trc(iocpf->ioc, fwhdr.exec);
+       writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+
+       /*
+        * Try to lock and then unlock the semaphore.
+        */
+       readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
+       writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+sem_get:
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -650,7 +773,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
                        if (bfa_ioc_sync_start(ioc)) {
-                               iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
                        } else {
@@ -664,6 +786,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_sem_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
@@ -689,10 +816,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
        /*
         * Call only the first time sm enters fwmismatch state.
         */
-       if (iocpf->retry_count == 0)
+       if (iocpf->fw_mismatch_notified == BFA_FALSE)
                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 
-       iocpf->retry_count++;
+       iocpf->fw_mismatch_notified = BFA_TRUE;
        bfa_iocpf_timer_start(iocpf->ioc);
 }
 
@@ -757,6 +884,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_sem_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -770,7 +902,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 {
-       bfa_iocpf_timer_start(iocpf->ioc);
+       iocpf->poll_time = 0;
        bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 }
 
@@ -787,20 +919,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_FWREADY:
-               bfa_iocpf_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
                break;
 
-       case IOCPF_E_INITFAIL:
-               bfa_iocpf_timer_stop(ioc);
-               /*
-                * !!! fall through !!!
-                */
-
        case IOCPF_E_TIMEOUT:
                writel(1, ioc->ioc_regs.ioc_sem_reg);
-               if (event == IOCPF_E_TIMEOUT)
-                       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+               bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
                break;
 
@@ -820,6 +944,10 @@ static void
 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 {
        bfa_iocpf_timer_start(iocpf->ioc);
+       /*
+        * Enable Interrupts before sending fw IOC ENABLE cmd.
+        */
+       iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
        bfa_ioc_send_enable(iocpf->ioc);
 }
 
@@ -860,10 +988,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
                break;
 
-       case IOCPF_E_FWREADY:
-               bfa_ioc_send_enable(ioc);
-               break;
-
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -895,16 +1019,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
                break;
 
-       case IOCPF_E_FWREADY:
-               if (bfa_ioc_is_operational(ioc)) {
-                       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
-               } else {
-                       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
-               }
-               break;
-
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -929,7 +1043,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_FWRSP_DISABLE:
-       case IOCPF_E_FWREADY:
                bfa_iocpf_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
                break;
@@ -976,6 +1089,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_FAIL:
                break;
 
@@ -990,6 +1108,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
 {
+       bfa_ioc_mbox_flush(iocpf->ioc);
        bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
 }
 
@@ -1002,7 +1121,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_ENABLE:
-               iocpf->retry_count = 0;
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
                break;
 
@@ -1019,6 +1137,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
 {
+       bfa_ioc_debug_save_ftrc(iocpf->ioc);
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -1035,20 +1154,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                bfa_ioc_notify_fail(ioc);
-               bfa_ioc_sync_ack(ioc);
-               iocpf->retry_count++;
-               if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
-                       bfa_ioc_sync_leave(ioc);
-                       writel(1, ioc->ioc_regs.ioc_sem_reg);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
-               } else {
-                       if (bfa_ioc_sync_complete(ioc))
-                               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
-                       else {
-                               writel(1, ioc->ioc_regs.ioc_sem_reg);
-                               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
-                       }
-               }
+               bfa_ioc_sync_leave(ioc);
+               writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+               writel(1, ioc->ioc_regs.ioc_sem_reg);
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+               break;
+
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
                break;
 
        case IOCPF_E_DISABLE:
@@ -1073,7 +1187,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
 {
-       bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
+       bfa_trc(iocpf->ioc, 0);
 }
 
 /*
@@ -1112,7 +1226,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
        /*
         * Flush any queued up mailbox requests.
         */
-       bfa_ioc_mbox_hbfail(iocpf->ioc);
+       bfa_ioc_mbox_flush(iocpf->ioc);
 
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
@@ -1126,11 +1240,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_SEMLOCKED:
-               iocpf->retry_count = 0;
                bfa_ioc_sync_ack(ioc);
                bfa_ioc_notify_fail(ioc);
                if (!iocpf->auto_recover) {
                        bfa_ioc_sync_leave(ioc);
+                       writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
                        writel(1, ioc->ioc_regs.ioc_sem_reg);
                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
                } else {
@@ -1143,6 +1257,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_sem_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -1159,6 +1278,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
 {
+       bfa_trc(iocpf->ioc, 0);
 }
 
 /*
@@ -1185,23 +1305,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  *  BFA IOC private functions
  */
 
+/*
+ * Notify common modules registered for notification.
+ */
 static void
-bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
 {
-       struct list_head                        *qe;
-       struct bfa_ioc_hbfail_notify_s  *notify;
-
-       ioc->cbfn->disable_cbfn(ioc->bfa);
+       struct bfa_ioc_notify_s *notify;
+       struct list_head        *qe;
 
-       /*
-        * Notify common modules registered for notification.
-        */
-       list_for_each(qe, &ioc->hb_notify_q) {
-               notify = (struct bfa_ioc_hbfail_notify_s *) qe;
-               notify->cbfn(notify->cbarg);
+       list_for_each(qe, &ioc->notify_q) {
+               notify = (struct bfa_ioc_notify_s *)qe;
+               notify->cbfn(notify->cbarg, event);
        }
 }
 
+static void
+bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+{
+       ioc->cbfn->disable_cbfn(ioc->bfa);
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
 bfa_boolean_t
 bfa_ioc_sem_get(void __iomem *sem_reg)
 {
@@ -1211,16 +1336,15 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
 
        r32 = readl(sem_reg);
 
-       while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+       while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
                cnt++;
                udelay(2);
                r32 = readl(sem_reg);
        }
 
-       if (r32 == 0)
+       if (!(r32 & 1))
                return BFA_TRUE;
 
-       WARN_ON(cnt >= BFA_SEM_SPINCNT);
        return BFA_FALSE;
 }
 
@@ -1234,7 +1358,12 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
         * will return 1. Semaphore is released by writing 1 to the register
         */
        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
-       if (r32 == 0) {
+       if (r32 == ~0) {
+               WARN_ON(r32 == ~0);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+               return;
+       }
+       if (!(r32 & 1)) {
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
                return;
        }
@@ -1343,7 +1472,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
        int i;
 
        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
-               bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+               bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
                if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -1369,7 +1498,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
 
        bfa_ioc_fwver_get(ioc, &fwhdr);
        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
-               bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+               bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        if (fwhdr.signature != drv_fwhdr->signature) {
                bfa_trc(ioc, fwhdr.signature);
@@ -1377,8 +1506,8 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
                return BFA_FALSE;
        }
 
-       if (swab32(fwhdr.param) != boot_env) {
-               bfa_trc(ioc, fwhdr.param);
+       if (swab32(fwhdr.bootenv) != boot_env) {
+               bfa_trc(ioc, fwhdr.bootenv);
                bfa_trc(ioc, boot_env);
                return BFA_FALSE;
        }
@@ -1414,8 +1543,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 
        bfa_trc(ioc, ioc_fwstate);
 
-       boot_type = BFI_BOOT_TYPE_NORMAL;
-       boot_env = BFI_BOOT_LOADER_OS;
+       boot_type = BFI_FWBOOT_TYPE_NORMAL;
+       boot_env = BFI_FWBOOT_ENV_OS;
 
        /*
         * check if firmware is valid
@@ -1425,6 +1554,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 
        if (!fwvalid) {
                bfa_ioc_boot(ioc, boot_type, boot_env);
+               bfa_ioc_poll_fwinit(ioc);
                return;
        }
 
@@ -1433,7 +1563,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
         * just wait for an initialization completion interrupt.
         */
        if (ioc_fwstate == BFI_IOC_INITING) {
-               ioc->cbfn->reset_cbfn(ioc->bfa);
+               bfa_ioc_poll_fwinit(ioc);
                return;
        }
 
@@ -1452,7 +1582,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                 * be flushed. Otherwise MSI-X interrupts are not delivered.
                 */
                bfa_ioc_msgflush(ioc);
-               ioc->cbfn->reset_cbfn(ioc->bfa);
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
                return;
        }
@@ -1461,6 +1590,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
         * Initialize the h/w for any other states.
         */
        bfa_ioc_boot(ioc, boot_type, boot_env);
+       bfa_ioc_poll_fwinit(ioc);
 }
 
 static void
@@ -1508,7 +1638,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
 
        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
                    bfa_ioc_portid(ioc));
-       enable_req.ioc_class = ioc->ioc_mc;
+       enable_req.clscode = cpu_to_be16(ioc->clscode);
        do_gettimeofday(&tv);
        enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
@@ -1572,25 +1702,26 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
        u32 loff = 0;
        u32 chunkno = 0;
        u32 i;
+       u32 asicmode;
 
        /*
         * Initialize LMEM first before code download
         */
        bfa_ioc_lmem_init(ioc);
 
-       bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
-       fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+       bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
+       fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
        pgoff = PSS_SMEM_PGOFF(loff);
 
        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
-       for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+       for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
 
                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
-                       fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+                       fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
                }
 
@@ -1616,11 +1747,15 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
                        ioc->ioc_regs.host_page_num_fn);
 
        /*
-        * Set boot type and boot param at the end.
-       */
-       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
+        * Set boot type and device mode at the end.
+        */
+       asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+                               ioc->port0_mode, ioc->port1_mode);
+       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
+                       swab32(asicmode));
+       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
                        swab32(boot_type));
-       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
+       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
                        swab32(boot_env));
 }
 
@@ -1636,6 +1771,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
        attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
        attr->card_type     = be32_to_cpu(attr->card_type);
        attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
+       ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
 
        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
 }
@@ -1690,7 +1826,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  * Cleanup any pending requests.
  */
 static void
-bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
 {
        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
        struct bfa_mbox_cmd_s           *cmd;
@@ -1752,6 +1888,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
        /*
         *  release semaphore.
         */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 
        bfa_trc(ioc, pgnum);
@@ -1808,6 +1945,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
        /*
         *  release semaphore.
         */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
        bfa_trc(ioc, pgnum);
        return BFA_STATUS_OK;
@@ -1816,18 +1954,13 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
 static void
 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
 {
-       struct list_head                *qe;
-       struct bfa_ioc_hbfail_notify_s  *notify;
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
        /*
         * Notify driver and common modules registered for notification.
         */
        ioc->cbfn->hbfail_cbfn(ioc->bfa);
-       list_for_each(qe, &ioc->hb_notify_q) {
-               notify = (struct bfa_ioc_hbfail_notify_s *) qe;
-               notify->cbfn(notify->cbarg);
-       }
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
 
        bfa_ioc_debug_save_ftrc(ioc);
 
@@ -1864,6 +1997,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
        /*
         *  release semaphore.
         */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 
        return BFA_STATUS_OK;
@@ -1876,8 +2010,6 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
 void
 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
 {
-       void __iomem *rb;
-
        bfa_ioc_stats(ioc, ioc_boots);
 
        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
@@ -1886,22 +2018,16 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
        /*
         * Initialize IOC state of all functions on a chip reset.
         */
-       rb = ioc->pcidev.pci_bar_kva;
-       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
-               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
-               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+       if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+               writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
        } else {
-               writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
-               writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+               writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
        }
 
        bfa_ioc_msgflush(ioc);
        bfa_ioc_download_fw(ioc, boot_type, boot_env);
-
-       /*
-        * Enable interrupts just before starting LPU
-        */
-       ioc->cbfn->reset_cbfn(ioc->bfa);
        bfa_ioc_lpu_start(ioc);
 }
 
@@ -1932,13 +2058,17 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
                (r32 != BFI_IOC_MEMTEST));
 }
 
-void
+bfa_boolean_t
 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
 {
        __be32  *msgp = mbmsg;
        u32     r32;
        int             i;
 
+       r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+       if ((r32 & 1) == 0)
+               return BFA_FALSE;
+
        /*
         * read the MBOX msg
         */
@@ -1954,6 +2084,8 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
         */
        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
        readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+       return BFA_TRUE;
 }
 
 void
@@ -1970,11 +2102,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
        case BFI_IOC_I2H_HBEAT:
                break;
 
-       case BFI_IOC_I2H_READY_EVENT:
-               bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
-               break;
-
        case BFI_IOC_I2H_ENABLE_REPLY:
+               ioc->port_mode = ioc->port_mode_cfg =
+                               (enum bfa_mode_s)msg->fw_event.port_mode;
+               ioc->ad_cap_bm = msg->fw_event.cap_bm;
                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
                break;
 
@@ -1986,6 +2117,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
                bfa_ioc_getattr_reply(ioc);
                break;
 
+       case BFI_IOC_I2H_ACQ_ADDR_REPLY:
+               bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
+               break;
+
        default:
                bfa_trc(ioc, msg->mh.msg_id);
                WARN_ON(1);
@@ -2011,7 +2146,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
        ioc->iocpf.ioc  = ioc;
 
        bfa_ioc_mbox_attach(ioc);
-       INIT_LIST_HEAD(&ioc->hb_notify_q);
+       INIT_LIST_HEAD(&ioc->notify_q);
 
        bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
        bfa_fsm_send_event(ioc, IOC_E_RESET);
@@ -2024,6 +2159,7 @@ void
 bfa_ioc_detach(struct bfa_ioc_s *ioc)
 {
        bfa_fsm_send_event(ioc, IOC_E_DETACH);
+       INIT_LIST_HEAD(&ioc->notify_q);
 }
 
 /*
@@ -2033,20 +2169,80 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
  */
 void
 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
-                enum bfi_mclass mc)
+               enum bfi_pcifn_class clscode)
 {
-       ioc->ioc_mc     = mc;
+       ioc->clscode    = clscode;
        ioc->pcidev     = *pcidev;
-       ioc->ctdev      = bfa_asic_id_ct(ioc->pcidev.device_id);
-       ioc->cna        = ioc->ctdev && !ioc->fcmode;
+
+       /*
+        * Initialize IOC and device personality
+        */
+       ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+       ioc->asic_mode  = BFI_ASIC_MODE_FC;
+
+       switch (pcidev->device_id) {
+       case BFA_PCI_DEVICE_ID_FC_8G1P:
+       case BFA_PCI_DEVICE_ID_FC_8G2P:
+               ioc->asic_gen = BFI_ASIC_GEN_CB;
+               ioc->fcmode = BFA_TRUE;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+               ioc->ad_cap_bm = BFA_CM_HBA;
+               break;
+
+       case BFA_PCI_DEVICE_ID_CT:
+               ioc->asic_gen = BFI_ASIC_GEN_CT;
+               ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+               ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+               ioc->ad_cap_bm = BFA_CM_CNA;
+               break;
+
+       case BFA_PCI_DEVICE_ID_CT_FC:
+               ioc->asic_gen = BFI_ASIC_GEN_CT;
+               ioc->fcmode = BFA_TRUE;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+               ioc->ad_cap_bm = BFA_CM_HBA;
+               break;
+
+       case BFA_PCI_DEVICE_ID_CT2:
+               ioc->asic_gen = BFI_ASIC_GEN_CT2;
+               if (clscode == BFI_PCIFN_CLASS_FC &&
+                   pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+                       ioc->asic_mode  = BFI_ASIC_MODE_FC16;
+                       ioc->fcmode = BFA_TRUE;
+                       ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+                       ioc->ad_cap_bm = BFA_CM_HBA;
+               } else {
+                       ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+                       ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+                       if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+                               ioc->port_mode =
+                               ioc->port_mode_cfg = BFA_MODE_CNA;
+                               ioc->ad_cap_bm = BFA_CM_CNA;
+                       } else {
+                               ioc->port_mode =
+                               ioc->port_mode_cfg = BFA_MODE_NIC;
+                               ioc->ad_cap_bm = BFA_CM_NIC;
+                       }
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+       }
 
        /*
         * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
         */
-       if (ioc->ctdev)
-               bfa_ioc_set_ct_hwif(ioc);
-       else
+       if (ioc->asic_gen == BFI_ASIC_GEN_CB)
                bfa_ioc_set_cb_hwif(ioc);
+       else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+               bfa_ioc_set_ct_hwif(ioc);
+       else {
+               WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+               bfa_ioc_set_ct2_hwif(ioc);
+               bfa_ioc_ct2_poweron(ioc);
+       }
 
        bfa_ioc_map_port(ioc);
        bfa_ioc_reg_init(ioc);
@@ -2172,36 +2368,38 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
        struct bfi_mbmsg_s              m;
        int                             mc;
 
-       bfa_ioc_msgget(ioc, &m);
+       if (bfa_ioc_msgget(ioc, &m)) {
+               /*
+                * Treat IOC message class as special.
+                */
+               mc = m.mh.msg_class;
+               if (mc == BFI_MC_IOC) {
+                       bfa_ioc_isr(ioc, &m);
+                       return;
+               }
+
+               if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+                       return;
 
-       /*
-        * Treat IOC message class as special.
-        */
-       mc = m.mh.msg_class;
-       if (mc == BFI_MC_IOC) {
-               bfa_ioc_isr(ioc, &m);
-               return;
+               mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
        }
 
-       if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
-               return;
+       bfa_ioc_lpu_read_stat(ioc);
 
-       mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+       /*
+        * Try to send pending mailbox commands
+        */
+       bfa_ioc_mbox_poll(ioc);
 }
 
 void
 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
 {
+       bfa_ioc_stats(ioc, ioc_hbfails);
+       ioc->stats.hb_count = ioc->hb_count;
        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
 }
 
-void
-bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
-{
-       ioc->fcmode  = BFA_TRUE;
-       ioc->port_id = bfa_ioc_pcifn(ioc);
-}
-
 /*
  * return true if IOC is disabled
  */
@@ -2212,6 +2410,15 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
+/*
+ * Return TRUE if IOC is in acquiring address state
+ */
+bfa_boolean_t
+bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
+{
+       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
+}
+
 /*
  * return true if IOC firmware is different.
  */
@@ -2239,17 +2446,16 @@ bfa_boolean_t
 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 {
        u32     ioc_state;
-       void __iomem *rb = ioc->pcidev.pci_bar_kva;
 
        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
                return BFA_FALSE;
 
-       ioc_state = readl(rb + BFA_IOC0_STATE_REG);
+       ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
        if (!bfa_ioc_state_disabled(ioc_state))
                return BFA_FALSE;
 
        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
-               ioc_state = readl(rb + BFA_IOC1_STATE_REG);
+               ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
                if (!bfa_ioc_state_disabled(ioc_state))
                        return BFA_FALSE;
        }
@@ -2308,24 +2514,21 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 
        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
 
-       ad_attr->cna_capable = ioc->cna;
-       ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
-                               !ad_attr->is_mezz;
+       ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
+       ad_attr->trunk_capable = (ad_attr->nports > 1) &&
+                                 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
 }
 
 enum bfa_ioc_type_e
 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
 {
-       if (!ioc->ctdev || ioc->fcmode)
-               return BFA_IOC_TYPE_FC;
-       else if (ioc->ioc_mc == BFI_MC_IOCFC)
-               return BFA_IOC_TYPE_FCoE;
-       else if (ioc->ioc_mc == BFI_MC_LL)
-               return BFA_IOC_TYPE_LL;
-       else {
-               WARN_ON(ioc->ioc_mc != BFI_MC_LL);
+       if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
                return BFA_IOC_TYPE_LL;
-       }
+
+       WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
+
+       return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+               ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
 }
 
 void
@@ -2384,11 +2587,8 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
 
        ioc_attr = ioc->attr;
 
-       /*
-        * model name
-        */
        snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
-               BFA_MFG_NAME, ioc_attr->card_type);
+                       BFA_MFG_NAME, ioc_attr->card_type);
 }
 
 enum bfa_ioc_state
@@ -2438,6 +2638,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 
        ioc_attr->state = bfa_ioc_get_state(ioc);
        ioc_attr->port_id = ioc->port_id;
+       ioc_attr->port_mode = ioc->port_mode;
+       ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+       ioc_attr->cap_bm = ioc->ad_cap_bm;
 
        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
 
@@ -2475,12 +2678,6 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
        return m;
 }
 
-bfa_boolean_t
-bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
-{
-       return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
-}
-
 /*
  * Retrieve saved firmware trace from a prior IOC failure.
  */
@@ -2531,7 +2728,7 @@ bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
 
        bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
                    bfa_ioc_portid(ioc));
-       req->ioc_class = ioc->ioc_mc;
+       req->clscode = cpu_to_be16(ioc->clscode);
        bfa_ioc_mbox_queue(ioc, &cmd);
 }
 
@@ -2673,6 +2870,7 @@ static void
 bfa_ioc_recover(struct bfa_ioc_s *ioc)
 {
        bfa_ioc_stats(ioc, ioc_hbfails);
+       ioc->stats.hb_count = ioc->hb_count;
        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
@@ -2703,6 +2901,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
        bfa_ioc_hw_sem_get(ioc);
 }
 
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
+{
+       u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+       bfa_trc(ioc, fwstate);
+
+       if (fwstate == BFI_IOC_DISABLED) {
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+               return;
+       }
+
+       if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
+               bfa_iocpf_timeout(ioc);
+       else {
+               ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+               bfa_iocpf_poll_timer_start(ioc);
+       }
+}
+
+static void
+bfa_iocpf_poll_timeout(void *ioc_arg)
+{
+       struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+       bfa_ioc_poll_fwinit(ioc);
+}
+
 /*
  *  bfa timer function
  */
@@ -2770,3 +2996,2423 @@ bfa_timer_stop(struct bfa_timer_s *timer)
 
        list_del(&timer->qe);
 }
+
+/*
+ *     ASIC block related
+ */
+static void
+bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
+{
+       struct bfa_ablk_cfg_inst_s *cfg_inst;
+       int i, j;
+       u16     be16;
+       u32     be32;
+
+       for (i = 0; i < BFA_ABLK_MAX; i++) {
+               cfg_inst = &cfg->inst[i];
+               for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
+                       be16 = cfg_inst->pf_cfg[j].pers;
+                       cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
+                       be16 = cfg_inst->pf_cfg[j].num_qpairs;
+                       cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
+                       be16 = cfg_inst->pf_cfg[j].num_vectors;
+                       cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
+                       be32 = cfg_inst->pf_cfg[j].bw;
+                       cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
+               }
+       }
+}
+
+static void
+bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+       struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
+       bfa_ablk_cbfn_t cbfn;
+
+       WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
+       bfa_trc(ablk->ioc, msg->mh.msg_id);
+
+       switch (msg->mh.msg_id) {
+       case BFI_ABLK_I2H_QUERY:
+               if (rsp->status == BFA_STATUS_OK) {
+                       memcpy(ablk->cfg, ablk->dma_addr.kva,
+                               sizeof(struct bfa_ablk_cfg_s));
+                       bfa_ablk_config_swap(ablk->cfg);
+                       ablk->cfg = NULL;
+               }
+               break;
+
+       case BFI_ABLK_I2H_ADPT_CONFIG:
+       case BFI_ABLK_I2H_PORT_CONFIG:
+               /* update config port mode */
+               ablk->ioc->port_mode_cfg = rsp->port_mode;
+
+       case BFI_ABLK_I2H_PF_DELETE:
+       case BFI_ABLK_I2H_PF_UPDATE:
+       case BFI_ABLK_I2H_OPTROM_ENABLE:
+       case BFI_ABLK_I2H_OPTROM_DISABLE:
+               /* No-op */
+               break;
+
+       case BFI_ABLK_I2H_PF_CREATE:
+               *(ablk->pcifn) = rsp->pcifn;
+               ablk->pcifn = NULL;
+               break;
+
+       default:
+               WARN_ON(1);
+       }
+
+       ablk->busy = BFA_FALSE;
+       if (ablk->cbfn) {
+               cbfn = ablk->cbfn;
+               ablk->cbfn = NULL;
+               cbfn(ablk->cbarg, rsp->status);
+       }
+}
+
+static void
+bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+       struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+
+       bfa_trc(ablk->ioc, event);
+
+       switch (event) {
+       case BFA_IOC_E_ENABLED:
+               WARN_ON(ablk->busy != BFA_FALSE);
+               break;
+
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               /* Fail any pending requests */
+               ablk->pcifn = NULL;
+               if (ablk->busy) {
+                       if (ablk->cbfn)
+                               ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
+                       ablk->cbfn = NULL;
+                       ablk->busy = BFA_FALSE;
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+u32
+bfa_ablk_meminfo(void)
+{
+       return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
+{
+       ablk->dma_addr.kva = dma_kva;
+       ablk->dma_addr.pa  = dma_pa;
+}
+
+void
+bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
+{
+       ablk->ioc = ioc;
+
+       bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
+       bfa_q_qe_init(&ablk->ioc_notify);
+       bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
+       list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
+}
+
+bfa_status_t
+bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_query_s *m;
+
+       WARN_ON(!ablk_cfg);
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cfg = ablk_cfg;
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
+                   bfa_ioc_portid(ablk->ioc));
+       bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+               u8 port, enum bfi_pcifn_class personality, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_pf_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->pcifn = pcifn;
+       ablk->cbfn = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
+                   bfa_ioc_portid(ablk->ioc));
+       m->pers = cpu_to_be16((u16)personality);
+       m->bw = cpu_to_be32(bw);
+       m->port = port;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_pf_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
+                   bfa_ioc_portid(ablk->ioc));
+       m->pcifn = (u8)pcifn;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
+               int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_cfg_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
+                   bfa_ioc_portid(ablk->ioc));
+       m->mode = (u8)mode;
+       m->max_pf = (u8)max_pf;
+       m->max_vf = (u8)max_vf;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
+               int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_cfg_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
+               bfa_ioc_portid(ablk->ioc));
+       m->port = (u8)port;
+       m->mode = (u8)mode;
+       m->max_pf = (u8)max_pf;
+       m->max_vf = (u8)max_vf;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_pf_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
+               bfa_ioc_portid(ablk->ioc));
+       m->pcifn = (u8)pcifn;
+       m->bw = cpu_to_be32(bw);
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_optrom_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
+               bfa_ioc_portid(ablk->ioc));
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_optrom_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
+               bfa_ioc_portid(ablk->ioc));
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     SFP module specific
+ */
+
+/* forward declarations */
+static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
+static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
+static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
+                               enum bfa_port_speed portspeed);
+
+static void
+bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
+{
+       bfa_trc(sfp, sfp->lock);
+       if (sfp->cbfn)
+               sfp->cbfn(sfp->cbarg, sfp->status);
+       sfp->lock = 0;
+       sfp->cbfn = NULL;
+}
+
+static void
+bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+       bfa_trc(sfp, sfp->portspeed);
+       if (sfp->media) {
+               bfa_sfp_media_get(sfp);
+               if (sfp->state_query_cbfn)
+                       sfp->state_query_cbfn(sfp->state_query_cbarg,
+                                       sfp->status);
+                       sfp->media = NULL;
+               }
+
+               if (sfp->portspeed) {
+                       sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+                       if (sfp->state_query_cbfn)
+                               sfp->state_query_cbfn(sfp->state_query_cbarg,
+                                               sfp->status);
+                               sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+               }
+
+               sfp->state_query_lock = 0;
+               sfp->state_query_cbfn = NULL;
+}
+
+/*
+ *     IOC event handler.
+ */
+static void
+bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
+{
+       struct bfa_sfp_s *sfp = sfp_arg;
+
+       bfa_trc(sfp, event);
+       bfa_trc(sfp, sfp->lock);
+       bfa_trc(sfp, sfp->state_query_lock);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (sfp->lock) {
+                       sfp->status = BFA_STATUS_IOC_FAILURE;
+                       bfa_cb_sfp_show(sfp);
+               }
+
+               if (sfp->state_query_lock) {
+                       sfp->status = BFA_STATUS_IOC_FAILURE;
+                       bfa_cb_sfp_state_query(sfp);
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ *     SFP get data send
+ */
+static void
+bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
+{
+       struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+       bfa_trc(sfp, req->memtype);
+
+       /* build host command */
+       bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
+                       bfa_ioc_portid(sfp->ioc));
+
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
+}
+
+/*
+ *     SFP is valid, read sfp data
+ */
+static void
+bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
+{
+       struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+       WARN_ON(sfp->lock != 0);
+       bfa_trc(sfp, sfp->state);
+
+       sfp->lock = 1;
+       sfp->memtype = memtype;
+       req->memtype = memtype;
+
+       /* Setup SG list */
+       bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
+
+       bfa_sfp_getdata_send(sfp);
+}
+
+/*
+ * SFP show complete
+ */
+static void
+bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+       struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
+
+       if (!sfp->lock) {
+               /*
+                * receiving response after ioc failure
+                */
+               bfa_trc(sfp, sfp->lock);
+               return;
+       }
+
+       bfa_trc(sfp, rsp->status);
+       if (rsp->status == BFA_STATUS_OK) {
+               sfp->data_valid = 1;
+               if (sfp->state == BFA_SFP_STATE_VALID)
+                       sfp->status = BFA_STATUS_OK;
+               else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+                       sfp->status = BFA_STATUS_SFP_UNSUPP;
+               else
+                       bfa_trc(sfp, sfp->state);
+       } else {
+               sfp->data_valid = 0;
+               sfp->status = rsp->status;
+               /* sfpshow shouldn't change sfp state */
+       }
+
+       bfa_trc(sfp, sfp->memtype);
+       if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
+               bfa_trc(sfp, sfp->data_valid);
+               if (sfp->data_valid) {
+                       u32     size = sizeof(struct sfp_mem_s);
+                       u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+                       memcpy(des, sfp->dbuf_kva, size);
+               }
+               /*
+                * Queue completion callback.
+                */
+               bfa_cb_sfp_show(sfp);
+       } else
+               sfp->lock = 0;
+
+       bfa_trc(sfp, sfp->state_query_lock);
+       if (sfp->state_query_lock) {
+               sfp->state = rsp->state;
+               /* Complete callback */
+               bfa_cb_sfp_state_query(sfp);
+       }
+}
+
+/*
+ *     SFP query fw sfp state
+ */
+static void
+bfa_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+       struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+       /* Should not be doing query if not in _INIT state */
+       WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
+       WARN_ON(sfp->state_query_lock != 0);
+       bfa_trc(sfp, sfp->state);
+
+       sfp->state_query_lock = 1;
+       req->memtype = 0;
+
+       if (!sfp->lock)
+               bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+}
+
+static void
+bfa_sfp_media_get(struct bfa_sfp_s *sfp)
+{
+       enum bfa_defs_sfp_media_e *media = sfp->media;
+
+       *media = BFA_SFP_MEDIA_UNKNOWN;
+
+       if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+               *media = BFA_SFP_MEDIA_UNSUPPORT;
+       else if (sfp->state == BFA_SFP_STATE_VALID) {
+               union sfp_xcvr_e10g_code_u e10g;
+               struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+               u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
+                               (sfpmem->srlid_base.xcvr[5] >> 1);
+
+               e10g.b = sfpmem->srlid_base.xcvr[0];
+               bfa_trc(sfp, e10g.b);
+               bfa_trc(sfp, xmtr_tech);
+               /* check fc transmitter tech */
+               if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
+                   (xmtr_tech & SFP_XMTR_TECH_CP) ||
+                   (xmtr_tech & SFP_XMTR_TECH_CA))
+                       *media = BFA_SFP_MEDIA_CU;
+               else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
+                        (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
+                       *media = BFA_SFP_MEDIA_EL;
+               else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
+                        (xmtr_tech & SFP_XMTR_TECH_LC))
+                       *media = BFA_SFP_MEDIA_LW;
+               else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
+                        (xmtr_tech & SFP_XMTR_TECH_SN) ||
+                        (xmtr_tech & SFP_XMTR_TECH_SA))
+                       *media = BFA_SFP_MEDIA_SW;
+               /* Check 10G Ethernet Compilance code */
+               else if (e10g.b & 0x10)
+                       *media = BFA_SFP_MEDIA_SW;
+               else if (e10g.b & 0x60)
+                       *media = BFA_SFP_MEDIA_LW;
+               else if (e10g.r.e10g_unall & 0x80)
+                       *media = BFA_SFP_MEDIA_UNKNOWN;
+               else
+                       bfa_trc(sfp, 0);
+       } else
+               bfa_trc(sfp, sfp->state);
+}
+
+static bfa_status_t
+bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
+{
+       struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+       struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
+       union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
+       union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
+
+       if (portspeed == BFA_PORT_SPEED_10GBPS) {
+               if (e10g.r.e10g_sr || e10g.r.e10g_lr)
+                       return BFA_STATUS_OK;
+               else {
+                       bfa_trc(sfp, e10g.b);
+                       return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+       if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
+           ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
+           ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
+           ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
+           ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
+               return BFA_STATUS_OK;
+       else {
+               bfa_trc(sfp, portspeed);
+               bfa_trc(sfp, fc3.b);
+               bfa_trc(sfp, e10g.b);
+               return BFA_STATUS_UNSUPP_SPEED;
+       }
+}
+
+/*
+ *     SFP hmbox handler
+ */
+void
+bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_sfp_s *sfp = sfparg;
+
+       switch (msg->mh.msg_id) {
+       case BFI_SFP_I2H_SHOW:
+               bfa_sfp_show_comp(sfp, msg);
+               break;
+
+       case BFI_SFP_I2H_SCN:
+               bfa_trc(sfp, msg->mh.msg_id);
+               break;
+
+       default:
+               bfa_trc(sfp, msg->mh.msg_id);
+               WARN_ON(1);
+       }
+}
+
+/*
+ *     Return DMA memory needed by sfp module.
+ */
+u32
+bfa_sfp_meminfo(void)
+{
+       return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ *     Attach virtual and physical memory for SFP.
+ */
+void
+bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
+               struct bfa_trc_mod_s *trcmod)
+{
+       sfp->dev = dev;
+       sfp->ioc = ioc;
+       sfp->trcmod = trcmod;
+
+       sfp->cbfn = NULL;
+       sfp->cbarg = NULL;
+       sfp->sfpmem = NULL;
+       sfp->lock = 0;
+       sfp->data_valid = 0;
+       sfp->state = BFA_SFP_STATE_INIT;
+       sfp->state_query_lock = 0;
+       sfp->state_query_cbfn = NULL;
+       sfp->state_query_cbarg = NULL;
+       sfp->media = NULL;
+       sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+       sfp->is_elb = BFA_FALSE;
+
+       bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
+       bfa_q_qe_init(&sfp->ioc_notify);
+       bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
+       list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
+}
+
+/*
+ *     Claim Memory for SFP
+ */
+void
+bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
+{
+       sfp->dbuf_kva   = dm_kva;
+       sfp->dbuf_pa    = dm_pa;
+       memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
+
+       dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+       dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Show SFP eeprom content
+ *
+ * @param[in] sfp   - bfa sfp module
+ *
+ * @param[out] sfpmem - sfp eeprom data
+ *
+ */
+bfa_status_t
+bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+               bfa_cb_sfp_t cbfn, void *cbarg)
+{
+
+       if (!bfa_ioc_is_operational(sfp->ioc)) {
+               bfa_trc(sfp, 0);
+               return BFA_STATUS_IOC_NON_OP;
+       }
+
+       if (sfp->lock) {
+               bfa_trc(sfp, 0);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       sfp->cbfn = cbfn;
+       sfp->cbarg = cbarg;
+       sfp->sfpmem = sfpmem;
+
+       bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Return SFP Media type
+ *
+ * @param[in] sfp   - bfa sfp module
+ *
+ * @param[out] media - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
+               bfa_cb_sfp_t cbfn, void *cbarg)
+{
+       if (!bfa_ioc_is_operational(sfp->ioc)) {
+               bfa_trc(sfp, 0);
+               return BFA_STATUS_IOC_NON_OP;
+       }
+
+       sfp->media = media;
+       if (sfp->state == BFA_SFP_STATE_INIT) {
+               if (sfp->state_query_lock) {
+                       bfa_trc(sfp, 0);
+                       return BFA_STATUS_DEVBUSY;
+               } else {
+                       sfp->state_query_cbfn = cbfn;
+                       sfp->state_query_cbarg = cbarg;
+                       bfa_sfp_state_query(sfp);
+                       return BFA_STATUS_SFP_NOT_READY;
+               }
+       }
+
+       bfa_sfp_media_get(sfp);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Check if user set port speed is allowed by the SFP
+ *
+ * @param[in] sfp   - bfa sfp module
+ * @param[in] portspeed - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
+               bfa_cb_sfp_t cbfn, void *cbarg)
+{
+       WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
+
+       if (!bfa_ioc_is_operational(sfp->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* For Mezz card, all speed is allowed */
+       if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
+               return BFA_STATUS_OK;
+
+       /* Check SFP state */
+       sfp->portspeed = portspeed;
+       if (sfp->state == BFA_SFP_STATE_INIT) {
+               if (sfp->state_query_lock) {
+                       bfa_trc(sfp, 0);
+                       return BFA_STATUS_DEVBUSY;
+               } else {
+                       sfp->state_query_cbfn = cbfn;
+                       sfp->state_query_cbarg = cbarg;
+                       bfa_sfp_state_query(sfp);
+                       return BFA_STATUS_SFP_NOT_READY;
+               }
+       }
+
+       if (sfp->state == BFA_SFP_STATE_REMOVED ||
+           sfp->state == BFA_SFP_STATE_FAILED) {
+               bfa_trc(sfp, sfp->state);
+               return BFA_STATUS_NO_SFP_DEV;
+       }
+
+       if (sfp->state == BFA_SFP_STATE_INSERTED) {
+               bfa_trc(sfp, sfp->state);
+               return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
+       }
+
+       /* For eloopback, all speed is allowed */
+       if (sfp->is_elb)
+               return BFA_STATUS_OK;
+
+       return bfa_sfp_speed_valid(sfp, portspeed);
+}
+
+/*
+ *     Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ       2048
+#define BFA_FLASH_DMA_BUF_SZ   \
+       BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash_s *flash)
+{
+       flash->op_busy = 0;
+       if (flash->cbfn)
+               flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+       struct bfa_flash_s      *flash = cbarg;
+
+       bfa_trc(flash, event);
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (flash->op_busy) {
+                       flash->status = BFA_STATUS_IOC_FAILURE;
+                       flash->cbfn(flash->cbarg, flash->status);
+                       flash->op_busy = 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * Send flash attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_query_send(void *cbarg)
+{
+       struct bfa_flash_s *flash = cbarg;
+       struct bfi_flash_query_req_s *msg =
+                       (struct bfi_flash_query_req_s *) flash->mb.msg;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+               bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
+               flash->dbuf_pa);
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash_s *flash)
+{
+       struct bfi_flash_write_req_s *msg =
+                       (struct bfi_flash_write_req_s *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+               flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+
+       /* indicate if it's the last msg of the whole write operation */
+       msg->last = (len == flash->residue) ? 1 : 0;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+                       bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+
+       flash->residue -= len;
+       flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+       struct bfa_flash_s *flash = cbarg;
+       struct bfi_flash_read_req_s *msg =
+                       (struct bfi_flash_read_req_s *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+                       flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+               bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash erase request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_erase_send(void *cbarg)
+{
+       struct bfa_flash_s *flash = cbarg;
+       struct bfi_flash_erase_req_s *msg =
+                       (struct bfi_flash_erase_req_s *) flash->mb.msg;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
+                       bfa_ioc_portid(flash->ioc));
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_flash_s *flash = flasharg;
+       u32     status;
+
+       union {
+               struct bfi_flash_query_rsp_s *query;
+               struct bfi_flash_erase_rsp_s *erase;
+               struct bfi_flash_write_rsp_s *write;
+               struct bfi_flash_read_rsp_s *read;
+               struct bfi_mbmsg_s   *msg;
+       } m;
+
+       m.msg = msg;
+       bfa_trc(flash, msg->mh.msg_id);
+
+       if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
+               /* receiving response after ioc failure */
+               bfa_trc(flash, 0x9999);
+               return;
+       }
+
+       switch (msg->mh.msg_id) {
+       case BFI_FLASH_I2H_QUERY_RSP:
+               status = be32_to_cpu(m.query->status);
+               bfa_trc(flash, status);
+               if (status == BFA_STATUS_OK) {
+                       u32     i;
+                       struct bfa_flash_attr_s *attr, *f;
+
+                       attr = (struct bfa_flash_attr_s *) flash->ubuf;
+                       f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
+                       attr->status = be32_to_cpu(f->status);
+                       attr->npart = be32_to_cpu(f->npart);
+                       bfa_trc(flash, attr->status);
+                       bfa_trc(flash, attr->npart);
+                       for (i = 0; i < attr->npart; i++) {
+                               attr->part[i].part_type =
+                                       be32_to_cpu(f->part[i].part_type);
+                               attr->part[i].part_instance =
+                                       be32_to_cpu(f->part[i].part_instance);
+                               attr->part[i].part_off =
+                                       be32_to_cpu(f->part[i].part_off);
+                               attr->part[i].part_size =
+                                       be32_to_cpu(f->part[i].part_size);
+                               attr->part[i].part_len =
+                                       be32_to_cpu(f->part[i].part_len);
+                               attr->part[i].part_status =
+                                       be32_to_cpu(f->part[i].part_status);
+                       }
+               }
+               flash->status = status;
+               bfa_flash_cb(flash);
+               break;
+       case BFI_FLASH_I2H_ERASE_RSP:
+               status = be32_to_cpu(m.erase->status);
+               bfa_trc(flash, status);
+               flash->status = status;
+               bfa_flash_cb(flash);
+               break;
+       case BFI_FLASH_I2H_WRITE_RSP:
+               status = be32_to_cpu(m.write->status);
+               bfa_trc(flash, status);
+               if (status != BFA_STATUS_OK || flash->residue == 0) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else {
+                       bfa_trc(flash, flash->offset);
+                       bfa_flash_write_send(flash);
+               }
+               break;
+       case BFI_FLASH_I2H_READ_RSP:
+               status = be32_to_cpu(m.read->status);
+               bfa_trc(flash, status);
+               if (status != BFA_STATUS_OK) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else {
+                       u32 len = be32_to_cpu(m.read->length);
+                       bfa_trc(flash, flash->offset);
+                       bfa_trc(flash, len);
+                       memcpy(flash->ubuf + flash->offset,
+                               flash->dbuf_kva, len);
+                       flash->residue -= len;
+                       flash->offset += len;
+                       if (flash->residue == 0) {
+                               flash->status = status;
+                               bfa_flash_cb(flash);
+                       } else
+                               bfa_flash_read_send(flash);
+               }
+               break;
+       case BFI_FLASH_I2H_BOOT_VER_RSP:
+       case BFI_FLASH_I2H_EVENT:
+               bfa_trc(flash, msg->mh.msg_id);
+               break;
+
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_flash_meminfo(bfa_boolean_t mincfg)
+{
+       /* min driver doesn't need flash */
+       if (mincfg)
+               return 0;
+       return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
+               struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+       flash->ioc = ioc;
+       flash->trcmod = trcmod;
+       flash->cbfn = NULL;
+       flash->cbarg = NULL;
+       flash->op_busy = 0;
+
+       bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+       bfa_q_qe_init(&flash->ioc_notify);
+       bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+       list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+
+       /* min driver doesn't need flash */
+       if (mincfg) {
+               flash->dbuf_kva = NULL;
+               flash->dbuf_pa = 0;
+       }
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
+               bfa_boolean_t mincfg)
+{
+       if (mincfg)
+               return;
+
+       flash->dbuf_kva = dm_kva;
+       flash->dbuf_pa = dm_pa;
+       memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+       dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+       dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
+               bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->ubuf = (u8 *) attr;
+       bfa_flash_query_send(flash);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Erase flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+               u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
+       bfa_trc(flash, type);
+       bfa_trc(flash, instance);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+
+       bfa_flash_erase_send(flash);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+               u8 instance, void *buf, u32 len, u32 offset,
+               bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
+       bfa_trc(flash, type);
+       bfa_trc(flash, instance);
+       bfa_trc(flash, len);
+       bfa_trc(flash, offset);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        * 'offset' must be in sector (16kb) boundary
+        */
+       if (!len || (len & 0x03) || (offset & 0x00003FFF))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (type == BFA_FLASH_PART_MFG)
+               return BFA_STATUS_EINVAL;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+
+       bfa_flash_write_send(flash);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+               u8 instance, void *buf, u32 len, u32 offset,
+               bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
+       bfa_trc(flash, type);
+       bfa_trc(flash, instance);
+       bfa_trc(flash, len);
+       bfa_trc(flash, offset);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        * 'offset' must be in sector (16kb) boundary
+        */
+       if (!len || (len & 0x03) || (offset & 0x00003FFF))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+       bfa_flash_read_send(flash);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     DIAG module specific
+ */
+
+#define BFA_DIAG_MEMTEST_TOV   50000   /* memtest timeout in msec */
+#define BFA_DIAG_FWPING_TOV    1000    /* msec */
+
+/* IOC event handler */
+static void
+bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
+{
+       struct bfa_diag_s *diag = diag_arg;
+
+       bfa_trc(diag, event);
+       bfa_trc(diag, diag->block);
+       bfa_trc(diag, diag->fwping.lock);
+       bfa_trc(diag, diag->tsensor.lock);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (diag->fwping.lock) {
+                       diag->fwping.status = BFA_STATUS_IOC_FAILURE;
+                       diag->fwping.cbfn(diag->fwping.cbarg,
+                                       diag->fwping.status);
+                       diag->fwping.lock = 0;
+               }
+
+               if (diag->tsensor.lock) {
+                       diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
+                       diag->tsensor.cbfn(diag->tsensor.cbarg,
+                                          diag->tsensor.status);
+                       diag->tsensor.lock = 0;
+               }
+
+               if (diag->block) {
+                       if (diag->timer_active) {
+                               bfa_timer_stop(&diag->timer);
+                               diag->timer_active = 0;
+                       }
+
+                       diag->status = BFA_STATUS_IOC_FAILURE;
+                       diag->cbfn(diag->cbarg, diag->status);
+                       diag->block = 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+static void
+bfa_diag_memtest_done(void *cbarg)
+{
+       struct bfa_diag_s *diag = cbarg;
+       struct bfa_ioc_s  *ioc = diag->ioc;
+       struct bfa_diag_memtest_result *res = diag->result;
+       u32     loff = BFI_BOOT_MEMTEST_RES_ADDR;
+       u32     pgnum, pgoff, i;
+
+       pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+       pgoff = PSS_SMEM_PGOFF(loff);
+
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+       for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
+                        sizeof(u32)); i++) {
+               /* read test result from smem */
+               *((u32 *) res + i) =
+                       bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+               loff += sizeof(u32);
+       }
+
+       /* Reset IOC fwstates to BFI_IOC_UNINIT */
+       bfa_ioc_reset_fwstate(ioc);
+
+       res->status = swab32(res->status);
+       bfa_trc(diag, res->status);
+
+       if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
+               diag->status = BFA_STATUS_OK;
+       else {
+               diag->status = BFA_STATUS_MEMTEST_FAILED;
+               res->addr = swab32(res->addr);
+               res->exp = swab32(res->exp);
+               res->act = swab32(res->act);
+               res->err_status = swab32(res->err_status);
+               res->err_status1 = swab32(res->err_status1);
+               res->err_addr = swab32(res->err_addr);
+               bfa_trc(diag, res->addr);
+               bfa_trc(diag, res->exp);
+               bfa_trc(diag, res->act);
+               bfa_trc(diag, res->err_status);
+               bfa_trc(diag, res->err_status1);
+               bfa_trc(diag, res->err_addr);
+       }
+       diag->timer_active = 0;
+       diag->cbfn(diag->cbarg, diag->status);
+       diag->block = 0;
+}
+
+/*
+ * Firmware ping
+ */
+
+/*
+ * Perform DMA test directly
+ */
+static void
+diag_fwping_send(struct bfa_diag_s *diag)
+{
+       struct bfi_diag_fwping_req_s *fwping_req;
+       u32     i;
+
+       bfa_trc(diag, diag->fwping.dbuf_pa);
+
+       /* fill DMA area with pattern */
+       for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
+               *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
+
+       /* Fill mbox msg */
+       fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
+
+       /* Setup SG list */
+       bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
+                       diag->fwping.dbuf_pa);
+       /* Set up dma count */
+       fwping_req->count = cpu_to_be32(diag->fwping.count);
+       /* Set up data pattern */
+       fwping_req->data = diag->fwping.data;
+
+       /* build host command */
+       bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
+               bfa_ioc_portid(diag->ioc));
+
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
+}
+
+static void
+diag_fwping_comp(struct bfa_diag_s *diag,
+                struct bfi_diag_fwping_rsp_s *diag_rsp)
+{
+       u32     rsp_data = diag_rsp->data;
+       u8      rsp_dma_status = diag_rsp->dma_status;
+
+       bfa_trc(diag, rsp_data);
+       bfa_trc(diag, rsp_dma_status);
+
+       if (rsp_dma_status == BFA_STATUS_OK) {
+               u32     i, pat;
+               pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
+                       diag->fwping.data;
+               /* Check mbox data */
+               if (diag->fwping.data != rsp_data) {
+                       bfa_trc(diag, rsp_data);
+                       diag->fwping.result->dmastatus =
+                                       BFA_STATUS_DATACORRUPTED;
+                       diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+                       diag->fwping.cbfn(diag->fwping.cbarg,
+                                       diag->fwping.status);
+                       diag->fwping.lock = 0;
+                       return;
+               }
+               /* Check dma pattern */
+               for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
+                       if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
+                               bfa_trc(diag, i);
+                               bfa_trc(diag, pat);
+                               bfa_trc(diag,
+                                       *((u32 *)diag->fwping.dbuf_kva + i));
+                               diag->fwping.result->dmastatus =
+                                               BFA_STATUS_DATACORRUPTED;
+                               diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+                               diag->fwping.cbfn(diag->fwping.cbarg,
+                                               diag->fwping.status);
+                               diag->fwping.lock = 0;
+                               return;
+                       }
+               }
+               diag->fwping.result->dmastatus = BFA_STATUS_OK;
+               diag->fwping.status = BFA_STATUS_OK;
+               diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+               diag->fwping.lock = 0;
+       } else {
+               diag->fwping.status = BFA_STATUS_HDMA_FAILED;
+               diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+               diag->fwping.lock = 0;
+       }
+}
+
+/*
+ * Temperature Sensor
+ */
+
+static void
+diag_tempsensor_send(struct bfa_diag_s *diag)
+{
+       struct bfi_diag_ts_req_s *msg;
+
+       msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
+       bfa_trc(diag, msg->temp);
+       /* build host command */
+       bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
+               bfa_ioc_portid(diag->ioc));
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
+}
+
+static void
+diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
+{
+       if (!diag->tsensor.lock) {
+               /* receiving response after ioc failure */
+               bfa_trc(diag, diag->tsensor.lock);
+               return;
+       }
+
+       /*
+        * ASIC junction tempsensor is a reg read operation
+        * it will always return OK
+        */
+       diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
+       diag->tsensor.temp->ts_junc = rsp->ts_junc;
+       diag->tsensor.temp->ts_brd = rsp->ts_brd;
+       diag->tsensor.temp->status = BFA_STATUS_OK;
+
+       if (rsp->ts_brd) {
+               if (rsp->status == BFA_STATUS_OK) {
+                       diag->tsensor.temp->brd_temp =
+                               be16_to_cpu(rsp->brd_temp);
+               } else {
+                       bfa_trc(diag, rsp->status);
+                       diag->tsensor.temp->brd_temp = 0;
+                       diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
+               }
+       }
+       bfa_trc(diag, rsp->ts_junc);
+       bfa_trc(diag, rsp->temp);
+       bfa_trc(diag, rsp->ts_brd);
+       bfa_trc(diag, rsp->brd_temp);
+       diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
+       diag->tsensor.lock = 0;
+}
+
+/*
+ *     LED Test command
+ */
+static void
+diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+       struct bfi_diag_ledtest_req_s  *msg;
+
+       msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
+       /* build host command */
+       bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
+                       bfa_ioc_portid(diag->ioc));
+
+       /*
+        * convert the freq from N blinks per 10 sec to
+        * crossbow ontime value. We do it here because division is need
+        */
+       if (ledtest->freq)
+               ledtest->freq = 500 / ledtest->freq;
+
+       if (ledtest->freq == 0)
+               ledtest->freq = 1;
+
+       bfa_trc(diag, ledtest->freq);
+       /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
+       msg->cmd = (u8) ledtest->cmd;
+       msg->color = (u8) ledtest->color;
+       msg->portid = bfa_ioc_portid(diag->ioc);
+       msg->led = ledtest->led;
+       msg->freq = cpu_to_be16(ledtest->freq);
+
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
+}
+
+static void
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+{
+       bfa_trc(diag, diag->ledtest.lock);
+       diag->ledtest.lock = BFA_FALSE;
+       /* no bfa_cb_queue is needed because driver is not waiting */
+}
+
+/*
+ * Port beaconing
+ */
+static void
+diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
+{
+       struct bfi_diag_portbeacon_req_s *msg;
+
+       msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
+       /* build host command */
+       bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
+               bfa_ioc_portid(diag->ioc));
+       msg->beacon = beacon;
+       msg->period = cpu_to_be32(sec);
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
+}
+
+static void
+diag_portbeacon_comp(struct bfa_diag_s *diag)
+{
+       bfa_trc(diag, diag->beacon.state);
+       diag->beacon.state = BFA_FALSE;
+       if (diag->cbfn_beacon)
+               diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
+}
+
+/*
+ *     Diag hmbox handler
+ */
+void
+bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_diag_s *diag = diagarg;
+
+       switch (msg->mh.msg_id) {
+       case BFI_DIAG_I2H_PORTBEACON:
+               diag_portbeacon_comp(diag);
+               break;
+       case BFI_DIAG_I2H_FWPING:
+               diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
+               break;
+       case BFI_DIAG_I2H_TEMPSENSOR:
+               diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
+               break;
+       case BFI_DIAG_I2H_LEDTEST:
+               diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
+               break;
+       default:
+               bfa_trc(diag, msg->mh.msg_id);
+               WARN_ON(1);
+       }
+}
+
+/*
+ * Gen RAM Test
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *memtest        - mem test params input from upper layer,
+ *   @param[in] pattern         - mem test pattern
+ *   @param[in] *result         - mem test result
+ *   @param[in] cbfn            - mem test callback functioin
+ *   @param[in] cbarg           - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
+               u32 pattern, struct bfa_diag_memtest_result *result,
+               bfa_cb_diag_t cbfn, void *cbarg)
+{
+       bfa_trc(diag, pattern);
+
+       if (!bfa_ioc_adapter_is_disabled(diag->ioc))
+               return BFA_STATUS_ADAPTER_ENABLED;
+
+       /* check to see if there is another destructive diag cmd running */
+       if (diag->block) {
+               bfa_trc(diag, diag->block);
+               return BFA_STATUS_DEVBUSY;
+       } else
+               diag->block = 1;
+
+       diag->result = result;
+       diag->cbfn = cbfn;
+       diag->cbarg = cbarg;
+
+       /* download memtest code and take LPU0 out of reset */
+       bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+
+       bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
+                       bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+       diag->timer_active = 1;
+       return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG firmware ping command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] cnt             - dma loop count for testing PCIE
+ *   @param[in] data            - data pattern to pass in fw
+ *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
+               struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
+               void *cbarg)
+{
+       bfa_trc(diag, cnt);
+       bfa_trc(diag, data);
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
+           ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
+               return BFA_STATUS_CMD_NOTSUPP;
+
+       /* check to see if there is another destructive diag cmd running */
+       if (diag->block || diag->fwping.lock) {
+               bfa_trc(diag, diag->block);
+               bfa_trc(diag, diag->fwping.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       /* Initialization */
+       diag->fwping.lock = 1;
+       diag->fwping.cbfn = cbfn;
+       diag->fwping.cbarg = cbarg;
+       diag->fwping.result = result;
+       diag->fwping.data = data;
+       diag->fwping.count = cnt;
+
+       /* Init test results */
+       diag->fwping.result->data = 0;
+       diag->fwping.result->status = BFA_STATUS_OK;
+
+       /* kick off the first ping */
+       diag_fwping_send(diag);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read Temperature Sensor
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *result         - pt to bfa_diag_temp_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+               struct bfa_diag_results_tempsensor_s *result,
+               bfa_cb_diag_t cbfn, void *cbarg)
+{
+       /* check to see if there is a destructive diag cmd running */
+       if (diag->block || diag->tsensor.lock) {
+               bfa_trc(diag, diag->block);
+               bfa_trc(diag, diag->tsensor.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* Init diag mod params */
+       diag->tsensor.lock = 1;
+       diag->tsensor.temp = result;
+       diag->tsensor.cbfn = cbfn;
+       diag->tsensor.cbarg = cbarg;
+
+       /* Send msg to fw */
+       diag_tempsensor_send(diag);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * LED Test command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *ledtest        - pt to ledtest data structure
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+       bfa_trc(diag, ledtest->cmd);
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (diag->beacon.state)
+               return BFA_STATUS_BEACON_ON;
+
+       if (diag->ledtest.lock)
+               return BFA_STATUS_LEDTEST_OP;
+
+       /* Send msg to fw */
+       diag->ledtest.lock = BFA_TRUE;
+       diag_ledtest_send(diag, ledtest);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Port beaconing command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] beacon          - port beaconing 1:ON   0:OFF
+ *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
+ *   @param[in] sec             - beaconing duration in seconds
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
+               bfa_boolean_t link_e2e_beacon, uint32_t sec)
+{
+       bfa_trc(diag, beacon);
+       bfa_trc(diag, link_e2e_beacon);
+       bfa_trc(diag, sec);
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (diag->ledtest.lock)
+               return BFA_STATUS_LEDTEST_OP;
+
+       if (diag->beacon.state && beacon)       /* beacon alread on */
+               return BFA_STATUS_BEACON_ON;
+
+       diag->beacon.state      = beacon;
+       diag->beacon.link_e2e   = link_e2e_beacon;
+       if (diag->cbfn_beacon)
+               diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
+
+       /* Send msg to fw */
+       diag_portbeacon_send(diag, beacon, sec);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Return DMA memory needed by diag module.
+ */
+u32
+bfa_diag_meminfo(void)
+{
+       return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ *     Attach virtual and physical memory for Diag.
+ */
+void
+bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+       bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
+{
+       diag->dev = dev;
+       diag->ioc = ioc;
+       diag->trcmod = trcmod;
+
+       diag->block = 0;
+       diag->cbfn = NULL;
+       diag->cbarg = NULL;
+       diag->result = NULL;
+       diag->cbfn_beacon = cbfn_beacon;
+
+       bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
+       bfa_q_qe_init(&diag->ioc_notify);
+       bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
+       list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
+}
+
+void
+bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
+{
+       diag->fwping.dbuf_kva = dm_kva;
+       diag->fwping.dbuf_pa = dm_pa;
+       memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
+}
+
+/*
+ *     PHY module specific
+ */
+#define BFA_PHY_DMA_BUF_SZ     0x02000         /* 8k dma buffer */
+#define BFA_PHY_LOCK_STATUS    0x018878        /* phy semaphore status reg */
+
+static void
+bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
+{
+       int i, m = sz >> 2;
+
+       for (i = 0; i < m; i++)
+               obuf[i] = be32_to_cpu(ibuf[i]);
+}
+
+static bfa_boolean_t
+bfa_phy_present(struct bfa_phy_s *phy)
+{
+       return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
+}
+
+static void
+bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+       struct bfa_phy_s *phy = cbarg;
+
+       bfa_trc(phy, event);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (phy->op_busy) {
+                       phy->status = BFA_STATUS_IOC_FAILURE;
+                       phy->cbfn(phy->cbarg, phy->status);
+                       phy->op_busy = 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * Send phy attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_query_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_query_req_s *msg =
+                       (struct bfi_phy_query_req_s *) phy->mb.msg;
+
+       msg->instance = phy->instance;
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_write_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_write_req_s *msg =
+                       (struct bfi_phy_write_req_s *) phy->mb.msg;
+       u32     len;
+       u16     *buf, *dbuf;
+       int     i, sz;
+
+       msg->instance = phy->instance;
+       msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+       len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+                       phy->residue : BFA_PHY_DMA_BUF_SZ;
+       msg->length = cpu_to_be32(len);
+
+       /* indicate if it's the last msg of the whole write operation */
+       msg->last = (len == phy->residue) ? 1 : 0;
+
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+
+       buf = (u16 *) (phy->ubuf + phy->offset);
+       dbuf = (u16 *)phy->dbuf_kva;
+       sz = len >> 1;
+       for (i = 0; i < sz; i++)
+               buf[i] = cpu_to_be16(dbuf[i]);
+
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+
+       phy->residue -= len;
+       phy->offset += len;
+}
+
+/*
+ * Send phy read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_read_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_read_req_s *msg =
+                       (struct bfi_phy_read_req_s *) phy->mb.msg;
+       u32     len;
+
+       msg->instance = phy->instance;
+       msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+       len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+                       phy->residue : BFA_PHY_DMA_BUF_SZ;
+       msg->length = cpu_to_be32(len);
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy stats request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_stats_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_stats_req_s *msg =
+                       (struct bfi_phy_stats_req_s *) phy->mb.msg;
+
+       msg->instance = phy->instance;
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_phy_meminfo(bfa_boolean_t mincfg)
+{
+       /* min driver doesn't need phy */
+       if (mincfg)
+               return 0;
+
+       return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
+               struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+       phy->ioc = ioc;
+       phy->trcmod = trcmod;
+       phy->cbfn = NULL;
+       phy->cbarg = NULL;
+       phy->op_busy = 0;
+
+       bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
+       bfa_q_qe_init(&phy->ioc_notify);
+       bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
+       list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
+
+       /* min driver doesn't need phy */
+       if (mincfg) {
+               phy->dbuf_kva = NULL;
+               phy->dbuf_pa = 0;
+       }
+}
+
+/*
+ * Claim memory for phy
+ *
+ * @param[in] phy - phy structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
+               bfa_boolean_t mincfg)
+{
+       if (mincfg)
+               return;
+
+       phy->dbuf_kva = dm_kva;
+       phy->dbuf_pa = dm_pa;
+       memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
+       dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+       dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+bfa_boolean_t
+bfa_phy_busy(struct bfa_ioc_s *ioc)
+{
+       void __iomem    *rb;
+
+       rb = bfa_ioc_bar0(ioc);
+       return readl(rb + BFA_PHY_LOCK_STATUS);
+}
+
+/*
+ * Get phy attribute.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] attr - phy attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+               struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
+       bfa_trc(phy, instance);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->ubuf = (uint8_t *) attr;
+       bfa_phy_query_send(phy);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Get phy stats.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] stats - pointer to phy stats
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+               struct bfa_phy_stats_s *stats,
+               bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
+       bfa_trc(phy, instance);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->ubuf = (u8 *) stats;
+       bfa_phy_stats_send(phy);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Update phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+               void *buf, u32 len, u32 offset,
+               bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
+       bfa_trc(phy, instance);
+       bfa_trc(phy, len);
+       bfa_trc(phy, offset);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* 'len' must be in word (4-byte) boundary */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FAILED;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->residue = len;
+       phy->offset = 0;
+       phy->addr_off = offset;
+       phy->ubuf = buf;
+
+       bfa_phy_write_send(phy);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+               void *buf, u32 len, u32 offset,
+               bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
+       bfa_trc(phy, instance);
+       bfa_trc(phy, len);
+       bfa_trc(phy, offset);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* 'len' must be in word (4-byte) boundary */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FAILED;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->residue = len;
+       phy->offset = 0;
+       phy->addr_off = offset;
+       phy->ubuf = buf;
+       bfa_phy_read_send(phy);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Process phy response messages upon receiving interrupts.
+ *
+ * @param[in] phyarg - phy structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_phy_s *phy = phyarg;
+       u32     status;
+
+       union {
+               struct bfi_phy_query_rsp_s *query;
+               struct bfi_phy_stats_rsp_s *stats;
+               struct bfi_phy_write_rsp_s *write;
+               struct bfi_phy_read_rsp_s *read;
+               struct bfi_mbmsg_s   *msg;
+       } m;
+
+       m.msg = msg;
+       bfa_trc(phy, msg->mh.msg_id);
+
+       if (!phy->op_busy) {
+               /* receiving response after ioc failure */
+               bfa_trc(phy, 0x9999);
+               return;
+       }
+
+       switch (msg->mh.msg_id) {
+       case BFI_PHY_I2H_QUERY_RSP:
+               status = be32_to_cpu(m.query->status);
+               bfa_trc(phy, status);
+
+               if (status == BFA_STATUS_OK) {
+                       struct bfa_phy_attr_s *attr =
+                               (struct bfa_phy_attr_s *) phy->ubuf;
+                       bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
+                                       sizeof(struct bfa_phy_attr_s));
+                       bfa_trc(phy, attr->status);
+                       bfa_trc(phy, attr->length);
+               }
+
+               phy->status = status;
+               phy->op_busy = 0;
+               if (phy->cbfn)
+                       phy->cbfn(phy->cbarg, phy->status);
+               break;
+       case BFI_PHY_I2H_STATS_RSP:
+               status = be32_to_cpu(m.stats->status);
+               bfa_trc(phy, status);
+
+               if (status == BFA_STATUS_OK) {
+                       struct bfa_phy_stats_s *stats =
+                               (struct bfa_phy_stats_s *) phy->ubuf;
+                       bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
+                               sizeof(struct bfa_phy_stats_s));
+                               bfa_trc(phy, stats->status);
+               }
+
+               phy->status = status;
+               phy->op_busy = 0;
+               if (phy->cbfn)
+                       phy->cbfn(phy->cbarg, phy->status);
+               break;
+       case BFI_PHY_I2H_WRITE_RSP:
+               status = be32_to_cpu(m.write->status);
+               bfa_trc(phy, status);
+
+               if (status != BFA_STATUS_OK || phy->residue == 0) {
+                       phy->status = status;
+                       phy->op_busy = 0;
+                       if (phy->cbfn)
+                               phy->cbfn(phy->cbarg, phy->status);
+               } else {
+                       bfa_trc(phy, phy->offset);
+                       bfa_phy_write_send(phy);
+               }
+               break;
+       case BFI_PHY_I2H_READ_RSP:
+               status = be32_to_cpu(m.read->status);
+               bfa_trc(phy, status);
+
+               if (status != BFA_STATUS_OK) {
+                       phy->status = status;
+                       phy->op_busy = 0;
+                       if (phy->cbfn)
+                               phy->cbfn(phy->cbarg, phy->status);
+               } else {
+                       u32 len = be32_to_cpu(m.read->length);
+                       u16 *buf = (u16 *)(phy->ubuf + phy->offset);
+                       u16 *dbuf = (u16 *)phy->dbuf_kva;
+                       int i, sz = len >> 1;
+
+                       bfa_trc(phy, phy->offset);
+                       bfa_trc(phy, len);
+
+                       for (i = 0; i < sz; i++)
+                               buf[i] = be16_to_cpu(dbuf[i]);
+
+                       phy->residue -= len;
+                       phy->offset += len;
+
+                       if (phy->residue == 0) {
+                               phy->status = status;
+                               phy->op_busy = 0;
+                               if (phy->cbfn)
+                                       phy->cbfn(phy->cbarg, phy->status);
+                       } else
+                               bfa_phy_read_send(phy);
+               }
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
index c85182a..c5ecd2e 100644 (file)
@@ -84,6 +84,68 @@ struct bfa_sge_s {
 #define bfa_sgaddr_le(_x)      (_x)
 #endif
 
+/*
+ * BFA memory resources
+ */
+struct bfa_mem_dma_s {
+       struct list_head qe;            /* Queue of DMA elements */
+       u32             mem_len;        /* Total Length in Bytes */
+       u8              *kva;           /* kernel virtual address */
+       u64             dma;            /* dma address if DMA memory */
+       u8              *kva_curp;      /* kva allocation cursor */
+       u64             dma_curp;       /* dma allocation cursor */
+};
+#define bfa_mem_dma_t struct bfa_mem_dma_s
+
+struct bfa_mem_kva_s {
+       struct list_head qe;            /* Queue of KVA elements */
+       u32             mem_len;        /* Total Length in Bytes */
+       u8              *kva;           /* kernel virtual address */
+       u8              *kva_curp;      /* kva allocation cursor */
+};
+#define bfa_mem_kva_t struct bfa_mem_kva_s
+
+struct bfa_meminfo_s {
+       struct bfa_mem_dma_s dma_info;
+       struct bfa_mem_kva_s kva_info;
+};
+
+/* BFA memory segment setup macros */
+#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do {     \
+       ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz);      \
+       if (_seg_sz)                                            \
+               list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe,  \
+                             &(_meminfo)->dma_info.qe);        \
+} while (0)
+
+#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do {    \
+       ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz);     \
+       if (_seg_sz)                                            \
+               list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
+                             &(_meminfo)->kva_info.qe);        \
+} while (0)
+
+/* BFA dma memory segments iterator */
+#define bfa_mem_dma_sptr(_mod, _i)     (&(_mod)->dma_seg[(_i)])
+#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i)                     \
+       for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr);    \
+            _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
+
+#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
+#define bfa_mem_dma_virt(_sptr)        ((_sptr)->kva_curp)
+#define bfa_mem_dma_phys(_sptr)        ((_sptr)->dma_curp)
+#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
+
+/* Get the corresponding dma buf kva for a req - from the tag */
+#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz)                            \
+       (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
+        BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/* Get the corresponding dma buf pa for a req - from the tag */
+#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz)                       \
+       ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp +  \
+        BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
 /*
  * PCI device information required by IOC
  */
@@ -91,6 +153,7 @@ struct bfa_pcidev_s {
        int             pci_slot;
        u8              pci_func;
        u16             device_id;
+       u16             ssid;
        void __iomem    *pci_bar_kva;
 };
 
@@ -112,18 +175,6 @@ struct bfa_dma_s {
 #define BFI_SMEM_CB_SIZE       0x200000U       /* ! 2MB for crossbow   */
 #define BFI_SMEM_CT_SIZE       0x280000U       /* ! 2.5MB for catapult */
 
-
-#define bfa_dma_addr_set(dma_addr, pa) \
-               __bfa_dma_addr_set(&dma_addr, (u64)pa)
-
-static inline void
-__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
-{
-       dma_addr->a32.addr_lo = (__be32) pa;
-       dma_addr->a32.addr_hi = (__be32) (pa >> 32);
-}
-
-
 #define bfa_dma_be_addr_set(dma_addr, pa)      \
                __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
 static inline void
@@ -133,11 +184,22 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
        dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 }
 
+#define bfa_alen_set(__alen, __len, __pa)      \
+       __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
+{
+       alen->al_len = cpu_to_be32(len);
+       bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
 struct bfa_ioc_regs_s {
        void __iomem *hfn_mbox_cmd;
        void __iomem *hfn_mbox;
        void __iomem *lpu_mbox_cmd;
        void __iomem *lpu_mbox;
+       void __iomem *lpu_read_stat;
        void __iomem *pss_ctl_reg;
        void __iomem *pss_err_status_reg;
        void __iomem *app_pll_fast_ctl_reg;
@@ -199,18 +261,26 @@ struct bfa_ioc_cbfn_s {
 };
 
 /*
- * Heartbeat failure notification queue element.
+ * IOC event notification mechanism.
  */
-struct bfa_ioc_hbfail_notify_s {
+enum bfa_ioc_event_e {
+       BFA_IOC_E_ENABLED       = 1,
+       BFA_IOC_E_DISABLED      = 2,
+       BFA_IOC_E_FAILED        = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
+
+struct bfa_ioc_notify_s {
        struct list_head                qe;
-       bfa_ioc_hbfail_cbfn_t   cbfn;
+       bfa_ioc_notify_cbfn_t   cbfn;
        void                    *cbarg;
 };
 
 /*
- * Initialize a heartbeat failure notification structure
+ * Initialize a IOC event notification structure
  */
-#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do {    \
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do {    \
        (__notify)->cbfn = (__cbfn);      \
        (__notify)->cbarg = (__cbarg);      \
 } while (0)
@@ -218,8 +288,9 @@ struct bfa_ioc_hbfail_notify_s {
 struct bfa_iocpf_s {
        bfa_fsm_t               fsm;
        struct bfa_ioc_s        *ioc;
-       u32             retry_count;
+       bfa_boolean_t           fw_mismatch_notified;
        bfa_boolean_t           auto_recover;
+       u32                     poll_time;
 };
 
 struct bfa_ioc_s {
@@ -231,17 +302,15 @@ struct bfa_ioc_s {
        struct bfa_timer_s      sem_timer;
        struct bfa_timer_s      hb_timer;
        u32             hb_count;
-       struct list_head                hb_notify_q;
+       struct list_head        notify_q;
        void                    *dbg_fwsave;
        int                     dbg_fwsave_len;
        bfa_boolean_t           dbg_fwsave_once;
-       enum bfi_mclass         ioc_mc;
+       enum bfi_pcifn_class    clscode;
        struct bfa_ioc_regs_s   ioc_regs;
        struct bfa_trc_mod_s    *trcmod;
        struct bfa_ioc_drv_stats_s      stats;
        bfa_boolean_t           fcmode;
-       bfa_boolean_t           ctdev;
-       bfa_boolean_t           cna;
        bfa_boolean_t           pllinit;
        bfa_boolean_t           stats_busy;     /*  outstanding stats */
        u8                      port_id;
@@ -251,10 +320,17 @@ struct bfa_ioc_s {
        struct bfa_ioc_mbox_mod_s mbox_mod;
        struct bfa_ioc_hwif_s   *ioc_hwif;
        struct bfa_iocpf_s      iocpf;
+       enum bfi_asic_gen       asic_gen;
+       enum bfi_asic_mode      asic_mode;
+       enum bfi_port_mode      port0_mode;
+       enum bfi_port_mode      port1_mode;
+       enum bfa_mode_s         port_mode;
+       u8                      ad_cap_bm;      /* adapter cap bit mask */
+       u8                      port_mode_cfg;  /* config port mode */
 };
 
 struct bfa_ioc_hwif_s {
-       bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
+       bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
        bfa_boolean_t   (*ioc_firmware_lock)    (struct bfa_ioc_s *ioc);
        void            (*ioc_firmware_unlock)  (struct bfa_ioc_s *ioc);
        void            (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -268,12 +344,356 @@ struct bfa_ioc_hwif_s {
        void            (*ioc_sync_leave)       (struct bfa_ioc_s *ioc);
        void            (*ioc_sync_ack)         (struct bfa_ioc_s *ioc);
        bfa_boolean_t   (*ioc_sync_complete)    (struct bfa_ioc_s *ioc);
+       bfa_boolean_t   (*ioc_lpu_read_stat)    (struct bfa_ioc_s *ioc);
+};
+
+/*
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+       struct list_head        qe;
+       void    (*qresume) (void *cbarg);
+       void    *cbarg;
+};
+
+typedef void   (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/*
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+       struct list_head        qe;
+       bfa_cb_cbfn_t   cbfn;
+       bfa_boolean_t   once;
+       void            *cbarg;
+};
+
+/*
+ * ASIC block configurtion related
+ */
+
+typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
+
+struct bfa_ablk_s {
+       struct bfa_ioc_s        *ioc;
+       struct bfa_ablk_cfg_s   *cfg;
+       u16                     *pcifn;
+       struct bfa_dma_s        dma_addr;
+       bfa_boolean_t           busy;
+       struct bfa_mbox_cmd_s   mb;
+       bfa_ablk_cbfn_t         cbfn;
+       void                    *cbarg;
+       struct bfa_ioc_notify_s ioc_notify;
+       struct bfa_mem_dma_s    ablk_dma;
+};
+#define BFA_MEM_ABLK_DMA(__bfa)                (&((__bfa)->modules.ablk.ablk_dma))
+
+/*
+ *     SFP module specific
+ */
+typedef void   (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_sfp_s {
+       void    *dev;
+       struct bfa_ioc_s        *ioc;
+       struct bfa_trc_mod_s    *trcmod;
+       struct sfp_mem_s        *sfpmem;
+       bfa_cb_sfp_t            cbfn;
+       void                    *cbarg;
+       enum bfi_sfp_mem_e      memtype; /* mem access type   */
+       u32                     status;
+       struct bfa_mbox_cmd_s   mbcmd;
+       u8                      *dbuf_kva; /* dma buf virtual address */
+       u64                     dbuf_pa;   /* dma buf physical address */
+       struct bfa_ioc_notify_s ioc_notify;
+       enum bfa_defs_sfp_media_e *media;
+       enum bfa_port_speed     portspeed;
+       bfa_cb_sfp_t            state_query_cbfn;
+       void                    *state_query_cbarg;
+       u8                      lock;
+       u8                      data_valid; /* data in dbuf is valid */
+       u8                      state;      /* sfp state  */
+       u8                      state_query_lock;
+       struct bfa_mem_dma_s    sfp_dma;
+       u8                      is_elb;     /* eloopback  */
+};
+
+#define BFA_SFP_MOD(__bfa)     (&(__bfa)->modules.sfp)
+#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma))
+
+u32    bfa_sfp_meminfo(void);
+
+void   bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
+                       void *dev, struct bfa_trc_mod_s *trcmod);
+
+void   bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
+void   bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
+
+bfa_status_t   bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+                            bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t   bfa_sfp_media(struct bfa_sfp_s *sfp,
+                       enum bfa_defs_sfp_media_e *media,
+                       bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t   bfa_sfp_speed(struct bfa_sfp_s *sfp,
+                       enum bfa_port_speed portspeed,
+                       bfa_cb_sfp_t cbfn, void *cbarg);
+
+/*
+ *     Flash module specific
+ */
+typedef void   (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_flash_s {
+       struct bfa_ioc_s *ioc;          /* back pointer to ioc */
+       struct bfa_trc_mod_s *trcmod;
+       u32             type;           /* partition type */
+       u8              instance;       /* partition instance */
+       u8              rsv[3];
+       u32             op_busy;        /*  operation busy flag */
+       u32             residue;        /*  residual length */
+       u32             offset;         /*  offset */
+       bfa_status_t    status;         /*  status */
+       u8              *dbuf_kva;      /*  dma buf virtual address */
+       u64             dbuf_pa;        /*  dma buf physical address */
+       struct bfa_reqq_wait_s  reqq_wait; /*  to wait for room in reqq */
+       bfa_cb_flash_t  cbfn;           /*  user callback function */
+       void            *cbarg;         /*  user callback arg */
+       u8              *ubuf;          /*  user supplied buffer */
+       struct bfa_cb_qe_s      hcb_qe; /*  comp: BFA callback qelem */
+       u32             addr_off;       /*  partition address offset */
+       struct bfa_mbox_cmd_s   mb;       /*  mailbox */
+       struct bfa_ioc_notify_s ioc_notify; /*  ioc event notify */
+       struct bfa_mem_dma_s    flash_dma;
+};
+
+#define BFA_FLASH(__bfa)               (&(__bfa)->modules.flash)
+#define BFA_MEM_FLASH_DMA(__bfa)       (&(BFA_FLASH(__bfa)->flash_dma))
+
+bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
+                       struct bfa_flash_attr_s *attr,
+                       bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
+                       enum bfa_flash_part_type type, u8 instance,
+                       bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
+                       enum bfa_flash_part_type type, u8 instance,
+                       void *buf, u32 len, u32 offset,
+                       bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
+                       enum bfa_flash_part_type type, u8 instance, void *buf,
+                       u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
+u32    bfa_flash_meminfo(bfa_boolean_t mincfg);
+void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
+               void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_flash_memclaim(struct bfa_flash_s *flash,
+               u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+
+/*
+ *     DIAG module specific
+ */
+
+typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
+typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
+                       bfa_boolean_t link_e2e_beacon);
+
+/*
+ *      Firmware ping test results
+ */
+struct bfa_diag_results_fwping {
+       u32     data;   /* store the corrupted data */
+       u32     status;
+       u32     dmastatus;
+       u8      rsvd[4];
+};
+
+struct bfa_diag_qtest_result_s {
+       u32     status;
+       u16     count;  /* sucessful queue test count */
+       u8      queue;
+       u8      rsvd;   /* 64-bit align */
+};
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_fwping_s {
+       struct bfa_diag_results_fwping *result;
+       bfa_cb_diag_t  cbfn;
+       void            *cbarg;
+       u32             data;
+       u8              lock;
+       u8              rsv[3];
+       u32             status;
+       u32             count;
+       struct bfa_mbox_cmd_s   mbcmd;
+       u8              *dbuf_kva;      /* dma buf virtual address */
+       u64             dbuf_pa;        /* dma buf physical address */
+};
+
+/*
+ *      Temperature sensor query results
+ */
+struct bfa_diag_results_tempsensor_s {
+       u32     status;
+       u16     temp;           /* 10-bit A/D value */
+       u16     brd_temp;       /* 9-bit board temp */
+       u8      ts_junc;        /* show junction tempsensor   */
+       u8      ts_brd;         /* show board tempsensor      */
+       u8      rsvd[6];        /* keep 8 bytes alignment     */
+};
+
+struct bfa_diag_tsensor_s {
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+       struct bfa_diag_results_tempsensor_s *temp;
+       u8              lock;
+       u8              rsv[3];
+       u32             status;
+       struct bfa_mbox_cmd_s   mbcmd;
 };
 
+struct bfa_diag_sfpshow_s {
+       struct sfp_mem_s        *sfpmem;
+       bfa_cb_diag_t           cbfn;
+       void                    *cbarg;
+       u8      lock;
+       u8      static_data;
+       u8      rsv[2];
+       u32     status;
+       struct bfa_mbox_cmd_s    mbcmd;
+       u8      *dbuf_kva;      /* dma buf virtual address */
+       u64     dbuf_pa;        /* dma buf physical address */
+};
+
+struct bfa_diag_led_s {
+       struct bfa_mbox_cmd_s   mbcmd;
+       bfa_boolean_t   lock;   /* 1: ledtest is operating */
+};
+
+struct bfa_diag_beacon_s {
+       struct bfa_mbox_cmd_s   mbcmd;
+       bfa_boolean_t   state;          /* port beacon state */
+       bfa_boolean_t   link_e2e;       /* link beacon state */
+};
+
+struct bfa_diag_s {
+       void    *dev;
+       struct bfa_ioc_s                *ioc;
+       struct bfa_trc_mod_s            *trcmod;
+       struct bfa_diag_fwping_s        fwping;
+       struct bfa_diag_tsensor_s       tsensor;
+       struct bfa_diag_sfpshow_s       sfpshow;
+       struct bfa_diag_led_s           ledtest;
+       struct bfa_diag_beacon_s        beacon;
+       void    *result;
+       struct bfa_timer_s timer;
+       bfa_cb_diag_beacon_t  cbfn_beacon;
+       bfa_cb_diag_t  cbfn;
+       void            *cbarg;
+       u8              block;
+       u8              timer_active;
+       u8              rsvd[2];
+       u32             status;
+       struct bfa_ioc_notify_s ioc_notify;
+       struct bfa_mem_dma_s    diag_dma;
+};
+
+#define BFA_DIAG_MOD(__bfa)     (&(__bfa)->modules.diag_mod)
+#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
+
+u32    bfa_diag_meminfo(void);
+void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+                    bfa_cb_diag_beacon_t cbfn_beacon,
+                    struct bfa_trc_mod_s *trcmod);
+bfa_status_t   bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
+                       u32 len, u32 *buf, u32 force);
+bfa_status_t   bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
+                       u32 len, u32 value, u32 force);
+bfa_status_t   bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+                       struct bfa_diag_results_tempsensor_s *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
+                       u32 pattern, struct bfa_diag_results_fwping *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_sfpshow(struct bfa_diag_s *diag,
+                       struct sfp_mem_s *sfpmem, u8 static_data,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_memtest(struct bfa_diag_s *diag,
+                       struct bfa_diag_memtest_s *memtest, u32 pattern,
+                       struct bfa_diag_memtest_result *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_ledtest(struct bfa_diag_s *diag,
+                       struct bfa_diag_ledtest_s *ledtest);
+bfa_status_t   bfa_diag_beacon_port(struct bfa_diag_s *diag,
+                       bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
+                       u32 sec);
+
+/*
+ *     PHY module specific
+ */
+typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_phy_s {
+       struct bfa_ioc_s *ioc;          /* back pointer to ioc */
+       struct bfa_trc_mod_s *trcmod;   /* trace module */
+       u8      instance;       /* port instance */
+       u8      op_busy;        /* operation busy flag */
+       u8      rsv[2];
+       u32     residue;        /* residual length */
+       u32     offset;         /* offset */
+       bfa_status_t    status;         /* status */
+       u8      *dbuf_kva;      /* dma buf virtual address */
+       u64     dbuf_pa;        /* dma buf physical address */
+       struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+       bfa_cb_phy_t    cbfn;           /* user callback function */
+       void            *cbarg;         /* user callback arg */
+       u8              *ubuf;          /* user supplied buffer */
+       struct bfa_cb_qe_s      hcb_qe; /* comp: BFA callback qelem */
+       u32     addr_off;       /* phy address offset */
+       struct bfa_mbox_cmd_s   mb;       /* mailbox */
+       struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+       struct bfa_mem_dma_s    phy_dma;
+};
+
+#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
+#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
+
+bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+                       struct bfa_phy_attr_s *attr,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+                       struct bfa_phy_stats_s *stats,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+                       void *buf, u32 len, u32 offset,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+                       void *buf, u32 len, u32 offset,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+
+u32    bfa_phy_meminfo(bfa_boolean_t mincfg);
+void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
+               void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_phy_memclaim(struct bfa_phy_s *phy,
+               u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
+
+/*
+ *     IOC specfic macros
+ */
 #define bfa_ioc_pcifn(__ioc)           ((__ioc)->pcidev.pci_func)
 #define bfa_ioc_devid(__ioc)           ((__ioc)->pcidev.device_id)
 #define bfa_ioc_bar0(__ioc)            ((__ioc)->pcidev.pci_bar_kva)
 #define bfa_ioc_portid(__ioc)          ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc)                ((__ioc)->asic_gen)
+#define bfa_ioc_is_cna(__ioc)  \
+       ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) ||      \
+        (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
                (((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)       \
@@ -287,12 +707,9 @@ struct bfa_ioc_hwif_s {
 
 #define bfa_ioc_stats(_ioc, _stats)    ((_ioc)->stats._stats++)
 #define BFA_IOC_FWIMG_MINSZ    (16 * 1024)
-#define BFA_IOC_FWIMG_TYPE(__ioc)                                      \
-       (((__ioc)->ctdev) ?                                             \
-        (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) :     \
-        BFI_IMAGE_CB_FC)
-#define BFA_IOC_FW_SMEM_SIZE(__ioc)                                    \
-       (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc)                    \
+       ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)   \
+        ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
 #define BFA_IOC_FLASH_CHUNK_NO(off)            (off / BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)     (off % BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
@@ -305,7 +722,7 @@ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
                bfa_ioc_mbox_mcfunc_t *mcfuncs);
 void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
 void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
-void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
+bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
 void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
                bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
 
@@ -315,40 +732,49 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 
 #define bfa_ioc_pll_init_asic(__ioc) \
        ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
-                          (__ioc)->fcmode))
+                          (__ioc)->asic_mode))
 
 bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
-bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
-bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
-bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 
-#define        bfa_ioc_isr_mode_set(__ioc, __msix)                     \
-                       ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do {                       \
+       if ((__ioc)->ioc_hwif->ioc_isr_mode_set)                        \
+               ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));   \
+} while (0)
 #define        bfa_ioc_ownership_reset(__ioc)                          \
                        ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+#define bfa_ioc_get_fcmode(__ioc)      ((__ioc)->fcmode)
+#define bfa_ioc_lpu_read_stat(__ioc) do {                      \
+       if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)               \
+               ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));  \
+} while (0)
 
-
-void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
 void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
 
 void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
                struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
 void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
-               enum bfi_mclass mc);
+               enum bfi_pcifn_class clscode);
 void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
 
 void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
-               u32 boot_param);
+               u32 boot_env);
 void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
 void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
@@ -372,8 +798,6 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
                                 int *trclen);
 bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
        u32 *offset, int *buflen);
-void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
-bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
                        struct bfi_ioc_image_hdr_s *fwhdr);
@@ -382,6 +806,33 @@ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
 bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 
+/*
+ * asic block configuration related APIs
+ */
+u32    bfa_ablk_meminfo(void);
+void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
+void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
+               struct bfa_ablk_cfg_s *ablk_cfg,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
+               enum bfa_mode_s mode, int max_pf, int max_vf,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
+               enum bfa_mode_s mode, int max_pf, int max_vf,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+               u8 port, enum bfi_pcifn_class personality, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+
 /*
  * bfa mfg wwn API functions
  */
@@ -391,50 +842,64 @@ mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
 /*
  * F/W Image Size & Chunk
  */
-extern u32 bfi_image_ct_fc_size;
-extern u32 bfi_image_ct_cna_size;
-extern u32 bfi_image_cb_fc_size;
-extern u32 *bfi_image_ct_fc;
-extern u32 *bfi_image_ct_cna;
-extern u32 *bfi_image_cb_fc;
+extern u32 bfi_image_cb_size;
+extern u32 bfi_image_ct_size;
+extern u32 bfi_image_ct2_size;
+extern u32 *bfi_image_cb;
+extern u32 *bfi_image_ct;
+extern u32 *bfi_image_ct2;
 
 static inline u32 *
-bfi_image_ct_fc_get_chunk(u32 off)
-{      return (u32 *)(bfi_image_ct_fc + off); }
+bfi_image_cb_get_chunk(u32 off)
+{
+       return (u32 *)(bfi_image_cb + off);
+}
 
 static inline u32 *
-bfi_image_ct_cna_get_chunk(u32 off)
-{      return (u32 *)(bfi_image_ct_cna + off); }
+bfi_image_ct_get_chunk(u32 off)
+{
+       return (u32 *)(bfi_image_ct + off);
+}
 
 static inline u32 *
-bfi_image_cb_fc_get_chunk(u32 off)
-{      return (u32 *)(bfi_image_cb_fc + off); }
+bfi_image_ct2_get_chunk(u32 off)
+{
+       return (u32 *)(bfi_image_ct2 + off);
+}
 
 static inline u32*
-bfa_cb_image_get_chunk(int type, u32 off)
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
 {
-       switch (type) {
-       case BFI_IMAGE_CT_FC:
-               return bfi_image_ct_fc_get_chunk(off);  break;
-       case BFI_IMAGE_CT_CNA:
-               return bfi_image_ct_cna_get_chunk(off); break;
-       case BFI_IMAGE_CB_FC:
-               return bfi_image_cb_fc_get_chunk(off);  break;
-       default: return NULL;
+       switch (asic_gen) {
+       case BFI_ASIC_GEN_CB:
+               return bfi_image_cb_get_chunk(off);
+               break;
+       case BFI_ASIC_GEN_CT:
+               return bfi_image_ct_get_chunk(off);
+               break;
+       case BFI_ASIC_GEN_CT2:
+               return bfi_image_ct2_get_chunk(off);
+               break;
+       default:
+               return NULL;
        }
 }
 
 static inline u32
-bfa_cb_image_get_size(int type)
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
 {
-       switch (type) {
-       case BFI_IMAGE_CT_FC:
-               return bfi_image_ct_fc_size;    break;
-       case BFI_IMAGE_CT_CNA:
-               return bfi_image_ct_cna_size;   break;
-       case BFI_IMAGE_CB_FC:
-               return bfi_image_cb_fc_size;    break;
-       default: return 0;
+       switch (asic_gen) {
+       case BFI_ASIC_GEN_CB:
+               return bfi_image_cb_size;
+               break;
+       case BFI_ASIC_GEN_CT:
+               return bfi_image_ct_size;
+               break;
+       case BFI_ASIC_GEN_CT2:
+               return bfi_image_ct2_size;
+               break;
+       default:
+               return 0;
        }
 }
 
index 89ae4c8..30df8a2 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_ioc.h"
-#include "bfi_cbreg.h"
+#include "bfi_reg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CB);
@@ -69,21 +69,6 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 static bfa_boolean_t
 bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
 {
-       struct bfi_ioc_image_hdr_s fwhdr;
-       uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
-
-       if (fwstate == BFI_IOC_UNINIT)
-               return BFA_TRUE;
-
-       bfa_ioc_fwver_get(ioc, &fwhdr);
-
-       if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
-               return BFA_TRUE;
-
-       bfa_trc(ioc, fwstate);
-       bfa_trc(ioc, fwhdr.exec);
-       writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
-
        return BFA_TRUE;
 }
 
@@ -98,7 +83,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
 static void
 bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
 {
-       writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+       writel(~0U, ioc->ioc_regs.err_set);
        readl(ioc->ioc_regs.err_set);
 }
 
@@ -152,8 +137,8 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
         */
        ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
        ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
-       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
-       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
+       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
 
        /*
         * IOC semaphore registers and serialization
@@ -285,18 +270,18 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
 }
 
 bfa_status_t
-bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
+bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
 {
        u32     pll_sclk, pll_fclk;
 
-       pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
-               __APP_PLL_212_P0_1(3U) |
-               __APP_PLL_212_JITLMT0_1(3U) |
-               __APP_PLL_212_CNTLMT0_1(3U);
-       pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
-               __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
-               __APP_PLL_400_JITLMT0_1(3U) |
-               __APP_PLL_400_CNTLMT0_1(3U);
+       pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
+               __APP_PLL_SCLK_P0_1(3U) |
+               __APP_PLL_SCLK_JITLMT0_1(3U) |
+               __APP_PLL_SCLK_CNTLMT0_1(3U);
+       pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
+               __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+               __APP_PLL_LCLK_JITLMT0_1(3U) |
+               __APP_PLL_LCLK_CNTLMT0_1(3U);
        writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
        writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
        writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
@@ -305,24 +290,24 @@ bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
        writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
-       writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
-       writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_212_CTL_REG);
-       writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
-       writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_400_CTL_REG);
+       writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+       writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_SCLK_CTL_REG);
+       writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+       writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_LCLK_CTL_REG);
        udelay(2);
-       writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
-       writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
-       writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_212_CTL_REG);
-       writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_400_CTL_REG);
+       writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+       writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_LCLK_CTL_REG);
        udelay(2000);
        writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
-       writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
-       writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
+       writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
+       writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
 
        return BFA_STATUS_OK;
 }
index 9361252..d1b8f0c 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_ioc.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CT);
@@ -36,9 +36,6 @@ BFA_TRC_FILE(CNA, IOC_CT);
  */
 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
@@ -48,29 +45,7 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 
 static struct bfa_ioc_hwif_s hwif_ct;
-
-/*
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
-void
-bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
-{
-       hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
-       hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
-       hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
-       hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
-       hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
-       hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
-       hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
-       hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
-       hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
-       hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
-       hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
-       hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
-       hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
-
-       ioc->ioc_hwif = &hwif_ct;
-}
+static struct bfa_ioc_hwif_s hwif_ct2;
 
 /*
  * Return true if firmware of current driver matches the running firmware.
@@ -82,16 +57,10 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
        u32 usecnt;
        struct bfi_ioc_image_hdr_s fwhdr;
 
-       /*
-        * Firmware match check is relevant only for CNA.
-        */
-       if (!ioc->cna)
-               return BFA_TRUE;
-
        /*
         * If bios boot (flash based) -- do not increment usage count
         */
-       if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+       if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return BFA_TRUE;
 
@@ -103,6 +72,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
         */
        if (usecnt == 0) {
                writel(1, ioc->ioc_regs.ioc_usage_reg);
+               readl(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
                writel(0, ioc->ioc_regs.ioc_fail_sync);
                bfa_trc(ioc, usecnt);
@@ -122,6 +92,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
         */
        bfa_ioc_fwver_get(ioc, &fwhdr);
        if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+               readl(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
                bfa_trc(ioc, usecnt);
                return BFA_FALSE;
@@ -132,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
         */
        usecnt++;
        writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+       readl(ioc->ioc_regs.ioc_usage_sem_reg);
        writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
        bfa_trc(ioc, usecnt);
        return BFA_TRUE;
@@ -142,16 +114,10 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 {
        u32 usecnt;
 
-       /*
-        * Firmware lock is relevant only for CNA.
-        */
-       if (!ioc->cna)
-               return;
-
        /*
         * If bios boot (flash based) -- do not decrement usage count
         */
-       if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+       if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return;
 
@@ -166,6 +132,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
        writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
        bfa_trc(ioc, usecnt);
 
+       readl(ioc->ioc_regs.ioc_usage_sem_reg);
        writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
@@ -175,14 +142,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 static void
 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 {
-       if (ioc->cna) {
+       if (bfa_ioc_is_cna(ioc)) {
                writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
                writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
                /* Wait for halt to take effect */
                readl(ioc->ioc_regs.ll_halt);
                readl(ioc->ioc_regs.alt_ll_halt);
        } else {
-               writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+               writel(~0U, ioc->ioc_regs.err_set);
                readl(ioc->ioc_regs.err_set);
        }
 }
@@ -190,7 +157,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 /*
  * Host to LPU mailbox message addresses
  */
-static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
        { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
        { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
        { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
@@ -200,21 +167,31 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
 /*
  * Host <-> LPU mailbox command/status registers - port 0
  */
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
-       { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
-       { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
-       { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
-       { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+static struct { u32 hfn, lpu; } ct_p0reg[] = {
+       { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+       { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+       { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+       { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
 };
 
 /*
  * Host <-> LPU mailbox command/status registers - port 1
  */
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
-       { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
-       { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
-       { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
-       { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+static struct { u32 hfn, lpu; } ct_p1reg[] = {
+       { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+       { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+       { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+       { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
+       ct2_reg[] = {
+       { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+         CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+         CT2_HOSTFN_LPU0_READ_STAT},
+       { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+         CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+         CT2_HOSTFN_LPU1_READ_STAT},
 };
 
 static void
@@ -225,24 +202,24 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 
        rb = bfa_ioc_bar0(ioc);
 
-       ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
-       ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
-       ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+       ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+       ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+       ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
 
        if (ioc->port_id == 0) {
                ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
                ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
                ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
-               ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
-               ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+               ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+               ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
                ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
                ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
        } else {
                ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
                ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
                ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
-               ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
-               ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+               ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+               ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
                ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
                ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
        }
@@ -252,8 +229,8 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
         */
        ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
        ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
-       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
-       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
 
        /*
         * IOC semaphore registers and serialization
@@ -276,6 +253,64 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
        ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 }
 
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
+{
+       void __iomem *rb;
+       int     port = bfa_ioc_portid(ioc);
+
+       rb = bfa_ioc_bar0(ioc);
+
+       ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+       ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+       ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+       ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+       ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+       ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+       if (port == 0) {
+               ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+               ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+               ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+               ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+               ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+       } else {
+               ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
+               ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
+               ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+               ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+               ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+       }
+
+       /*
+        * PSS control registers
+        */
+       ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+       ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
+       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
+
+       /*
+        * IOC semaphore registers and serialization
+        */
+       ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
+       ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
+       ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
+       ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
+       ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
+
+       /*
+        * sram memory access
+        */
+       ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+       ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+       /*
+        * err set reg : for notification of hb failure in fcmode
+        */
+       ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
 /*
  * Initialize IOC to port mapping.
  */
@@ -298,6 +333,19 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
        bfa_trc(ioc, ioc->port_id);
 }
 
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
+{
+       void __iomem    *rb = ioc->pcidev.pci_bar_kva;
+       u32     r32;
+
+       r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+       ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+
+       bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+       bfa_trc(ioc, ioc->port_id);
+}
+
 /*
  * Set interrupt mode for a function: INTX or MSIX
  */
@@ -316,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
        /*
         * If already in desired mode, do not change anything
         */
-       if (!msix && mode)
+       if ((!msix && mode) || (msix && !mode))
                return;
 
        if (msix)
@@ -331,6 +379,20 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
        writel(r32, rb + FNC_PERS_REG);
 }
 
+bfa_boolean_t
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
+{
+       u32     r32;
+
+       r32 = readl(ioc->ioc_regs.lpu_read_stat);
+       if (r32) {
+               writel(1, ioc->ioc_regs.lpu_read_stat);
+               return BFA_TRUE;
+       }
+
+       return BFA_FALSE;
+}
+
 /*
  * Cleanup hw semaphore and usecnt registers
  */
@@ -338,9 +400,10 @@ static void
 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 {
 
-       if (ioc->cna) {
+       if (bfa_ioc_is_cna(ioc)) {
                bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(0, ioc->ioc_regs.ioc_usage_reg);
+               readl(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
        }
 
@@ -449,32 +512,99 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
        return BFA_FALSE;
 }
 
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+static void
+bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
+{
+       hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+       hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+       hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
+       hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+       hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
+       hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
+       hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
+       hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
+       hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+       bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
+
+       hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+       hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+       hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+       hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+       ioc->ioc_hwif = &hwif_ct;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
+{
+       bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
+
+       hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
+       hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
+       hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
+       hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
+       hwif_ct2.ioc_isr_mode_set = NULL;
+       ioc->ioc_hwif = &hwif_ct2;
+}
+
 /*
- * Check the firmware state to know if pll_init has been completed already
+ * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  */
-bfa_boolean_t
-bfa_ioc_ct_pll_init_complete(void __iomem *rb)
+#define HOSTFN_MSIX_DEFAULT            64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR  0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT      0x3013c
+#define __MSIX_VT_NUMVT__MK            0x003ff800
+#define __MSIX_VT_NUMVT__SH            11
+#define __MSIX_VT_NUMVT_(_v)           ((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_                        0x000007ff
+void
+bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
 {
-       if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
-         (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
-               return BFA_TRUE;
+       void __iomem *rb = ioc->pcidev.pci_bar_kva;
+       u32     r32;
 
-       return BFA_FALSE;
+       r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+       if (r32 & __MSIX_VT_NUMVT__MK) {
+               writel(r32 & __MSIX_VT_OFST_,
+                       rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+               return;
+       }
+
+       writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+               HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+               rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+       writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+               rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
 }
 
 bfa_status_t
-bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
 {
        u32     pll_sclk, pll_fclk, r32;
+       bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
+
+       pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+               __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+               __APP_PLL_SCLK_JITLMT0_1(3U) |
+               __APP_PLL_SCLK_CNTLMT0_1(1U);
+       pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+               __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+               __APP_PLL_LCLK_JITLMT0_1(3U) |
+               __APP_PLL_LCLK_CNTLMT0_1(1U);
 
-       pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
-               __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
-               __APP_PLL_312_JITLMT0_1(3U) |
-               __APP_PLL_312_CNTLMT0_1(1U);
-       pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
-               __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
-               __APP_PLL_425_JITLMT0_1(3U) |
-               __APP_PLL_425_CNTLMT0_1(1U);
        if (fcmode) {
                writel(0, (rb + OP_MODE));
                writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
@@ -491,20 +621,21 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
        writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
-       writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_312_CTL_REG);
-       writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_425_CTL_REG);
-       writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
-                       rb + APP_PLL_312_CTL_REG);
-       writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
-                       rb + APP_PLL_425_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_LCLK_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
+               __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
+               __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
        readl(rb + HOSTFN0_INT_MSK);
        udelay(2000);
        writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
-       writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
-       writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+
        if (!fcmode) {
                writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
                writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
@@ -524,3 +655,206 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
        writel(0, (rb + MBIST_CTL_REG));
        return BFA_STATUS_OK;
 }
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+       u32 r32;
+
+       /*
+        * put s_clk PLL and PLL FSM in reset
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+       r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+               __APP_PLL_SCLK_LOGIC_SOFT_RESET);
+       writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * Ignore mode and program for the max clock (which is FC16)
+        * Firmware/NFC will do the PLL init appropiately
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+       writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * while doing PLL init dont clock gate ethernet subsystem
+        */
+       r32 = readl((rb + CT2_CHIP_MISC_PRG));
+       writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
+
+       r32 = readl((rb + CT2_PCIE_MISC_REG));
+       writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
+
+       /*
+        * set sclk value
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+               __APP_PLL_SCLK_CLK_DIV2);
+       writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * poll for s_clk lock or delay 1ms
+        */
+       udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+       u32 r32;
+
+       /*
+        * put l_clk PLL and PLL FSM in reset
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+       r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+               __APP_PLL_LCLK_LOGIC_SOFT_RESET);
+       writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * set LPU speed (set for FC16 which will work for other modes)
+        */
+       r32 = readl((rb + CT2_CHIP_MISC_PRG));
+       writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+       /*
+        * set LPU half speed (set for FC16 which will work for other modes)
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * set lclk for mode (set for FC16)
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+       r32 |= 0x20c1731b;
+       writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * poll for s_clk lock or delay 1ms
+        */
+       udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+       u32     r32;
+
+       r32 = readl((rb + PSS_CTL_REG));
+       r32 &= ~__PSS_LMEM_RESET;
+       writel(r32, (rb + PSS_CTL_REG));
+       udelay(1000);
+
+       writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+       udelay(1000);
+       writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+       u32     r32;
+
+       bfa_ioc_ct2_sclk_init(rb);
+       bfa_ioc_ct2_lclk_init(rb);
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /* put port0, port1 MAC & AHB in reset */
+       writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+               rb + CT2_CSI_MAC_CONTROL_REG(0));
+       writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+               rb + CT2_CSI_MAC_CONTROL_REG(1));
+}
+
+#define CT2_NFC_MAX_DELAY      1000
+bfa_status_t
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+       u32     wgn, r32;
+       int i;
+
+       /*
+        * Initialize PLL if not already done by NFC
+        */
+       wgn = readl(rb + CT2_WGN_STATUS);
+       if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+               writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
+               for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+                       r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+                       if (r32 & __NFC_CONTROLLER_HALTED)
+                               break;
+                       udelay(1000);
+               }
+       }
+
+       /*
+        * Mask the interrupts and clear any
+        * pending interrupts.
+        */
+       writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+       writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+       r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+       if (r32 == 1) {
+               writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+               readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+       }
+       r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+       if (r32 == 1) {
+               writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+               readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+       }
+
+       bfa_ioc_ct2_mac_reset(rb);
+       bfa_ioc_ct2_sclk_init(rb);
+       bfa_ioc_ct2_lclk_init(rb);
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * Announce flash device presence, if flash was corrupted.
+        */
+       if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+               r32 = readl((rb + PSS_GPIO_OUT_REG));
+               writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+               r32 = readl((rb + PSS_GPIO_OE_REG));
+               writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+       }
+
+       bfa_ioc_ct2_mem_init(rb);
+
+       writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+       writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+       return BFA_STATUS_OK;
+}
index ab79ff6..1c6efd4 100644 (file)
 #include "bfa_port.h"
 
 struct bfa_modules_s {
+       struct bfa_fcdiag_s     fcdiag;         /* fcdiag module */
        struct bfa_fcport_s     fcport;         /*  fc port module            */
        struct bfa_fcxp_mod_s   fcxp_mod;       /*  fcxp module       */
        struct bfa_lps_mod_s    lps_mod;        /*  fcxp module       */
        struct bfa_uf_mod_s     uf_mod;         /*  unsolicited frame module */
        struct bfa_rport_mod_s  rport_mod;      /*  remote port module        */
-       struct bfa_fcpim_mod_s  fcpim_mod;      /*  FCP initiator module     */
+       struct bfa_fcp_mod_s    fcp_mod;        /*  FCP initiator module     */
        struct bfa_sgpg_mod_s   sgpg_mod;       /*  SG page module            */
        struct bfa_port_s       port;           /*  Physical port module     */
+       struct bfa_ablk_s       ablk;           /*  ASIC block config module */
+       struct bfa_cee_s        cee;            /*  CEE Module  */
+       struct bfa_sfp_s        sfp;            /*  SFP module  */
+       struct bfa_flash_s      flash;          /*  flash module */
+       struct bfa_diag_s       diag_mod;       /*  diagnostics module  */
+       struct bfa_phy_s        phy;            /*  phy module          */
 };
 
 /*
@@ -51,17 +58,16 @@ enum {
        BFA_TRC_HAL_IOCFC_CB    = 5,
 };
 
-
 /*
  * Macro to define a new BFA module
  */
 #define BFA_MODULE(__mod)                                              \
        static void bfa_ ## __mod ## _meminfo(                          \
-                       struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,      \
-                       u32 *dm_len);      \
+                       struct bfa_iocfc_cfg_s *cfg,                    \
+                       struct bfa_meminfo_s *meminfo,                  \
+                       struct bfa_s *bfa);                             \
        static void bfa_ ## __mod ## _attach(struct bfa_s *bfa,         \
                        void *bfad, struct bfa_iocfc_cfg_s *cfg,        \
-                       struct bfa_meminfo_s *meminfo,                  \
                        struct bfa_pcidev_s *pcidev);      \
        static void bfa_ ## __mod ## _detach(struct bfa_s *bfa);      \
        static void bfa_ ## __mod ## _start(struct bfa_s *bfa);      \
@@ -87,11 +93,11 @@ enum {
  * can leave entry points as NULL)
  */
 struct bfa_module_s {
-       void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                       u32 *dm_len);
+       void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
+                        struct bfa_meminfo_s *meminfo,
+                        struct bfa_s *bfa);
        void (*attach) (struct bfa_s *bfa, void *bfad,
                        struct bfa_iocfc_cfg_s *cfg,
-                       struct bfa_meminfo_s *meminfo,
                        struct bfa_pcidev_s *pcidev);
        void (*detach) (struct bfa_s *bfa);
        void (*start) (struct bfa_s *bfa);
@@ -109,19 +115,20 @@ struct bfa_s {
        struct bfa_timer_mod_s  timer_mod;      /*  timer module            */
        struct bfa_modules_s    modules;        /*  BFA modules     */
        struct list_head        comp_q;         /*  pending completions     */
-       bfa_boolean_t           rme_process;    /*  RME processing enabled  */
+       bfa_boolean_t           queue_process;  /*  queue processing enabled */
        struct list_head        reqq_waitq[BFI_IOC_MAX_CQS];
        bfa_boolean_t           fcs;            /*  FCS is attached to BFA */
        struct bfa_msix_s       msix;
 };
 
 extern bfa_boolean_t bfa_auto_recover;
+extern struct bfa_module_s hal_mod_fcdiag;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_fcport;
 extern struct bfa_module_s hal_mod_fcxp;
 extern struct bfa_module_s hal_mod_lps;
 extern struct bfa_module_s hal_mod_uf;
 extern struct bfa_module_s hal_mod_rport;
-extern struct bfa_module_s hal_mod_fcpim;
+extern struct bfa_module_s hal_mod_fcp;
 
 #endif /* __BFA_MODULES_H__ */
index 3f8e9d6..95e4ad8 100644 (file)
@@ -24,8 +24,6 @@
 
 BFA_TRC_FILE(CNA, PORT);
 
-#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
-
 static void
 bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
 {
@@ -236,6 +234,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 {
        struct bfi_port_generic_req_s *m;
 
+       /* If port is PBC disabled, return error */
+       if (port->pbc_disabled) {
+               bfa_trc(port, BFA_STATUS_PBC);
+               return BFA_STATUS_PBC;
+       }
+
        if (bfa_ioc_is_disabled(port->ioc)) {
                bfa_trc(port, BFA_STATUS_IOC_DISABLED);
                return BFA_STATUS_IOC_DISABLED;
@@ -280,6 +284,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 {
        struct bfi_port_generic_req_s *m;
 
+       /* If port is PBC disabled, return error */
+       if (port->pbc_disabled) {
+               bfa_trc(port, BFA_STATUS_PBC);
+               return BFA_STATUS_PBC;
+       }
+
        if (bfa_ioc_is_disabled(port->ioc)) {
                bfa_trc(port, BFA_STATUS_IOC_DISABLED);
                return BFA_STATUS_IOC_DISABLED;
@@ -387,32 +397,43 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
 }
 
 /*
- * bfa_port_hbfail()
+ * bfa_port_notify()
  *
+ * Port module IOC event handler
  *
  * @param[in] Pointer to the Port module data structure.
+ * @param[in] IOC event structure
  *
  * @return void
  */
 void
-bfa_port_hbfail(void *arg)
+bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
 {
        struct bfa_port_s *port = (struct bfa_port_s *) arg;
 
-       /* Fail any pending get_stats/clear_stats requests */
-       if (port->stats_busy) {
-               if (port->stats_cbfn)
-                       port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
-               port->stats_cbfn = NULL;
-               port->stats_busy = BFA_FALSE;
-       }
-
-       /* Clear any enable/disable is pending */
-       if (port->endis_pending) {
-               if (port->endis_cbfn)
-                       port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
-               port->endis_cbfn = NULL;
-               port->endis_pending = BFA_FALSE;
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               /* Fail any pending get_stats/clear_stats requests */
+               if (port->stats_busy) {
+                       if (port->stats_cbfn)
+                               port->stats_cbfn(port->stats_cbarg,
+                                               BFA_STATUS_FAILED);
+                       port->stats_cbfn = NULL;
+                       port->stats_busy = BFA_FALSE;
+               }
+
+               /* Clear any enable/disable is pending */
+               if (port->endis_pending) {
+                       if (port->endis_cbfn)
+                               port->endis_cbfn(port->endis_cbarg,
+                                               BFA_STATUS_FAILED);
+                       port->endis_cbfn = NULL;
+                       port->endis_pending = BFA_FALSE;
+               }
+               break;
+       default:
+               break;
        }
 }
 
@@ -445,10 +466,12 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
        port->endis_pending = BFA_FALSE;
        port->stats_cbfn = NULL;
        port->endis_cbfn = NULL;
+       port->pbc_disabled = BFA_FALSE;
 
        bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
-       bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
-       list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
+       bfa_q_qe_init(&port->ioc_notify);
+       bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
+       list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
 
        /*
         * initialize time stamp for stats reset
@@ -458,3 +481,368 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 
        bfa_trc(port, 0);
 }
+
+/*
+ *     CEE module specific definitions
+ */
+
+/*
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *                 status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+       struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
+
+       cee->get_attr_status = status;
+       bfa_trc(cee, 0);
+       if (status == BFA_STATUS_OK) {
+               bfa_trc(cee, 0);
+               memcpy(cee->attr, cee->attr_dma.kva,
+                       sizeof(struct bfa_cee_attr_s));
+               lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
+               lldp_cfg->enabled_system_cap =
+                               be16_to_cpu(lldp_cfg->enabled_system_cap);
+       }
+       cee->get_attr_pending = BFA_FALSE;
+       if (cee->cbfn.get_attr_cbfn) {
+               bfa_trc(cee, 0);
+               cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+       }
+}
+
+/*
+ * bfa_cee_get_stats_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *           status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+       u32 *buffer;
+       int i;
+
+       cee->get_stats_status = status;
+       bfa_trc(cee, 0);
+       if (status == BFA_STATUS_OK) {
+               bfa_trc(cee, 0);
+               memcpy(cee->stats, cee->stats_dma.kva,
+                       sizeof(struct bfa_cee_stats_s));
+               /* swap the cee stats */
+               buffer = (u32 *)cee->stats;
+               for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
+                                sizeof(u32)); i++)
+                       buffer[i] = cpu_to_be32(buffer[i]);
+       }
+       cee->get_stats_pending = BFA_FALSE;
+       bfa_trc(cee, 0);
+       if (cee->cbfn.get_stats_cbfn) {
+               bfa_trc(cee, 0);
+               cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+       }
+}
+
+/*
+ * bfa_cee_reset_stats_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+       cee->reset_stats_status = status;
+       cee->reset_stats_pending = BFA_FALSE;
+       if (cee->cbfn.reset_stats_cbfn)
+               cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+
+/*
+ * bfa_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+       return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
+               BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ *            dma_kva Kernel Virtual Address of CEE DMA Memory
+ *            dma_pa  Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
+{
+       cee->attr_dma.kva = dma_kva;
+       cee->attr_dma.pa = dma_pa;
+       cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
+                            sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+       cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
+                            sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+       cee->attr = (struct bfa_cee_attr_s *) dma_kva;
+       cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
+                       sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
+}
+
+/*
+ * bfa_cee_get_attr()
+ *
+ * @brief
+ *   Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
+                bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_get_req_s *cmd;
+
+       WARN_ON((cee == NULL) || (cee->ioc == NULL));
+       bfa_trc(cee, 0);
+       if (!bfa_ioc_is_operational(cee->ioc)) {
+               bfa_trc(cee, 0);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+       if (cee->get_attr_pending == BFA_TRUE) {
+               bfa_trc(cee, 0);
+               return  BFA_STATUS_DEVBUSY;
+       }
+       cee->get_attr_pending = BFA_TRUE;
+       cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
+       cee->attr = attr;
+       cee->cbfn.get_attr_cbfn = cbfn;
+       cee->cbfn.get_attr_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+               bfa_ioc_portid(cee->ioc));
+       bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+       bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_get_stats()
+ *
+ * @brief
+ *   Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
+                 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_get_req_s *cmd;
+
+       WARN_ON((cee == NULL) || (cee->ioc == NULL));
+
+       if (!bfa_ioc_is_operational(cee->ioc)) {
+               bfa_trc(cee, 0);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+       if (cee->get_stats_pending == BFA_TRUE) {
+               bfa_trc(cee, 0);
+               return  BFA_STATUS_DEVBUSY;
+       }
+       cee->get_stats_pending = BFA_TRUE;
+       cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
+       cee->stats = stats;
+       cee->cbfn.get_stats_cbfn = cbfn;
+       cee->cbfn.get_stats_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+               bfa_ioc_portid(cee->ioc));
+       bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+       bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_reset_stats()
+ *
+ * @brief Clears CEE Stats in the f/w.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_reset_stats(struct bfa_cee_s *cee,
+                   bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_reset_stats_s *cmd;
+
+       WARN_ON((cee == NULL) || (cee->ioc == NULL));
+       if (!bfa_ioc_is_operational(cee->ioc)) {
+               bfa_trc(cee, 0);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+       if (cee->reset_stats_pending == BFA_TRUE) {
+               bfa_trc(cee, 0);
+               return  BFA_STATUS_DEVBUSY;
+       }
+       cee->reset_stats_pending = BFA_TRUE;
+       cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
+       cee->cbfn.reset_stats_cbfn = cbfn;
+       cee->cbfn.reset_stats_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+               bfa_ioc_portid(cee->ioc));
+       bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+       union bfi_cee_i2h_msg_u *msg;
+       struct bfi_cee_get_rsp_s *get_rsp;
+       struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
+       msg = (union bfi_cee_i2h_msg_u *) m;
+       get_rsp = (struct bfi_cee_get_rsp_s *) m;
+       bfa_trc(cee, msg->mh.msg_id);
+       switch (msg->mh.msg_id) {
+       case BFI_CEE_I2H_GET_CFG_RSP:
+               bfa_trc(cee, get_rsp->cmd_status);
+               bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+               break;
+       case BFI_CEE_I2H_GET_STATS_RSP:
+               bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+               break;
+       case BFI_CEE_I2H_RESET_STATS_RSP:
+               bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ * bfa_cee_notify()
+ *
+ * @brief CEE module IOC event handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
+ *
+ * @return void
+ */
+
+void
+bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
+{
+       struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
+
+       bfa_trc(cee, event);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (cee->get_attr_pending == BFA_TRUE) {
+                       cee->get_attr_status = BFA_STATUS_FAILED;
+                       cee->get_attr_pending  = BFA_FALSE;
+                       if (cee->cbfn.get_attr_cbfn) {
+                               cee->cbfn.get_attr_cbfn(
+                                       cee->cbfn.get_attr_cbarg,
+                                       BFA_STATUS_FAILED);
+                       }
+               }
+               if (cee->get_stats_pending == BFA_TRUE) {
+                       cee->get_stats_status = BFA_STATUS_FAILED;
+                       cee->get_stats_pending  = BFA_FALSE;
+                       if (cee->cbfn.get_stats_cbfn) {
+                               cee->cbfn.get_stats_cbfn(
+                               cee->cbfn.get_stats_cbarg,
+                               BFA_STATUS_FAILED);
+                       }
+               }
+               if (cee->reset_stats_pending == BFA_TRUE) {
+                       cee->reset_stats_status = BFA_STATUS_FAILED;
+                       cee->reset_stats_pending  = BFA_FALSE;
+                       if (cee->cbfn.reset_stats_cbfn) {
+                               cee->cbfn.reset_stats_cbfn(
+                               cee->cbfn.reset_stats_cbarg,
+                               BFA_STATUS_FAILED);
+                       }
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * bfa_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ *            ioc - Pointer to the ioc module data structure
+ *            dev - Pointer to the device driver module data structure
+ *                  The device driver specific mbox ISR functions have
+ *                  this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
+               void *dev)
+{
+       WARN_ON(cee == NULL);
+       cee->dev = dev;
+       cee->ioc = ioc;
+
+       bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+       bfa_q_qe_init(&cee->ioc_notify);
+       bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+       list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
+}
index c4ee9db..947f897 100644 (file)
@@ -43,12 +43,16 @@ struct bfa_port_s {
        bfa_port_endis_cbfn_t           endis_cbfn;
        void                            *endis_cbarg;
        bfa_status_t                    endis_status;
-       struct bfa_ioc_hbfail_notify_s  hbfail;
+       struct bfa_ioc_notify_s         ioc_notify;
+       bfa_boolean_t                   pbc_disabled;
+       struct bfa_mem_dma_s            port_dma;
 };
 
+#define BFA_MEM_PORT_DMA(__bfa)                (&((__bfa)->modules.port.port_dma))
+
 void        bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
                                void *dev, struct bfa_trc_mod_s *trcmod);
-void        bfa_port_hbfail(void *arg);
+void   bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
 
 bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
                                 union bfa_port_stats_u *stats,
@@ -62,4 +66,58 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
 u32     bfa_port_meminfo(void);
 void        bfa_port_mem_claim(struct bfa_port_s *port,
                                 u8 *dma_kva, u64 dma_pa);
+
+/*
+ * CEE declaration
+ */
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_cee_cbfn_s {
+       bfa_cee_get_attr_cbfn_t         get_attr_cbfn;
+       void                            *get_attr_cbarg;
+       bfa_cee_get_stats_cbfn_t        get_stats_cbfn;
+       void                            *get_stats_cbarg;
+       bfa_cee_reset_stats_cbfn_t      reset_stats_cbfn;
+       void                            *reset_stats_cbarg;
+};
+
+struct bfa_cee_s {
+       void *dev;
+       bfa_boolean_t           get_attr_pending;
+       bfa_boolean_t           get_stats_pending;
+       bfa_boolean_t           reset_stats_pending;
+       bfa_status_t            get_attr_status;
+       bfa_status_t            get_stats_status;
+       bfa_status_t            reset_stats_status;
+       struct bfa_cee_cbfn_s   cbfn;
+       struct bfa_ioc_notify_s ioc_notify;
+       struct bfa_trc_mod_s    *trcmod;
+       struct bfa_cee_attr_s   *attr;
+       struct bfa_cee_stats_s  *stats;
+       struct bfa_dma_s        attr_dma;
+       struct bfa_dma_s        stats_dma;
+       struct bfa_ioc_s        *ioc;
+       struct bfa_mbox_cmd_s   get_cfg_mb;
+       struct bfa_mbox_cmd_s   get_stats_mb;
+       struct bfa_mbox_cmd_s   reset_stats_mb;
+       struct bfa_mem_dma_s    cee_dma;
+};
+
+#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma))
+
+u32    bfa_cee_meminfo(void);
+void   bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa);
+void   bfa_cee_attach(struct bfa_cee_s *cee,
+                       struct bfa_ioc_s *ioc, void *dev);
+bfa_status_t   bfa_cee_get_attr(struct bfa_cee_s *cee,
+                               struct bfa_cee_attr_s *attr,
+                               bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+bfa_status_t   bfa_cee_get_stats(struct bfa_cee_s *cee,
+                               struct bfa_cee_stats_s *stats,
+                               bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t   bfa_cee_reset_stats(struct bfa_cee_s *cee,
+                               bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
+
 #endif /* __BFA_PORT_H__ */
index 16d9a5f..21caaef 100644 (file)
@@ -21,6 +21,7 @@
 #include "bfa_modules.h"
 
 BFA_TRC_FILE(HAL, FCXP);
+BFA_MODULE(fcdiag);
 BFA_MODULE(fcxp);
 BFA_MODULE(sgpg);
 BFA_MODULE(lps);
@@ -113,11 +114,10 @@ static void       bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
 /*
  * forward declarations for LPS functions
  */
-static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-                               u32 *dm_len);
+static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
                                struct bfa_iocfc_cfg_s *cfg,
-                               struct bfa_meminfo_s *meminfo,
                                struct bfa_pcidev_s *pcidev);
 static void bfa_lps_detach(struct bfa_s *bfa);
 static void bfa_lps_start(struct bfa_s *bfa);
@@ -125,6 +125,7 @@ static void bfa_lps_stop(struct bfa_s *bfa);
 static void bfa_lps_iocdisable(struct bfa_s *bfa);
 static void bfa_lps_login_rsp(struct bfa_s *bfa,
                                struct bfi_lps_login_rsp_s *rsp);
+static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
                                struct bfi_lps_logout_rsp_s *rsp);
 static void bfa_lps_reqq_resume(void *lps_arg);
@@ -430,51 +431,17 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  */
 
 static void
-claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
-{
-       u8             *dm_kva = NULL;
-       u64     dm_pa;
-       u32     buf_pool_sz;
-
-       dm_kva = bfa_meminfo_dma_virt(mi);
-       dm_pa = bfa_meminfo_dma_phys(mi);
-
-       buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
-
-       /*
-        * Initialize the fcxp req payload list
-        */
-       mod->req_pld_list_kva = dm_kva;
-       mod->req_pld_list_pa = dm_pa;
-       dm_kva += buf_pool_sz;
-       dm_pa += buf_pool_sz;
-       memset(mod->req_pld_list_kva, 0, buf_pool_sz);
-
-       /*
-        * Initialize the fcxp rsp payload list
-        */
-       buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
-       mod->rsp_pld_list_kva = dm_kva;
-       mod->rsp_pld_list_pa = dm_pa;
-       dm_kva += buf_pool_sz;
-       dm_pa += buf_pool_sz;
-       memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
-
-       bfa_meminfo_dma_virt(mi) = dm_kva;
-       bfa_meminfo_dma_phys(mi) = dm_pa;
-}
-
-static void
-claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
 {
        u16     i;
        struct bfa_fcxp_s *fcxp;
 
-       fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
+       fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
        memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 
        INIT_LIST_HEAD(&mod->fcxp_free_q);
        INIT_LIST_HEAD(&mod->fcxp_active_q);
+       INIT_LIST_HEAD(&mod->fcxp_unused_q);
 
        mod->fcxp_list = fcxp;
 
@@ -489,40 +456,53 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
                fcxp = fcxp + 1;
        }
 
-       bfa_meminfo_kva(mi) = (void *)fcxp;
+       bfa_mem_kva_curp(mod) = (void *)fcxp;
 }
 
 static void
-bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-                u32 *dm_len)
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
-       u16     num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
+       struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
+       struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_fcxp;
+       u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+       u32     per_fcxp_sz;
 
-       if (num_fcxp_reqs == 0)
+       if (num_fcxps == 0)
                return;
 
-       /*
-        * Account for req/rsp payload
-        */
-       *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
        if (cfg->drvcfg.min_cfg)
-               *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+               per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
        else
-               *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
+               per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
 
-       /*
-        * Account for fcxp structs
-        */
-       *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
+       /* dma memory */
+       nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
+       per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
+
+       bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
+               if (num_fcxps >= per_seg_fcxp) {
+                       num_fcxps -= per_seg_fcxp;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               per_seg_fcxp * per_fcxp_sz);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               num_fcxps * per_fcxp_sz);
+       }
+
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, fcxp_kva,
+               cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
 }
 
 static void
 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-       memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
        mod->bfa = bfa;
        mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 
@@ -535,8 +515,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        INIT_LIST_HEAD(&mod->wait_q);
 
-       claim_fcxp_req_rsp_mem(mod, meminfo);
-       claim_fcxps_mem(mod, meminfo);
+       claim_fcxps_mem(mod);
 }
 
 static void
@@ -561,6 +540,9 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
        struct bfa_fcxp_s *fcxp;
        struct list_head              *qe, *qen;
 
+       /* Enqueue unused fcxp resources to free_q */
+       list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
+
        list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
                fcxp = (struct bfa_fcxp_s *) qe;
                if (fcxp->caller == NULL) {
@@ -749,23 +731,6 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
        }
 }
 
-static void
-hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
-{
-       union bfi_addr_u      sga_zero = { {0} };
-
-       sge->sg_len = reqlen;
-       sge->flags = BFI_SGE_DATA_LAST;
-       bfa_dma_addr_set(sge[0].sga, req_pa);
-       bfa_sge_to_be(sge);
-       sge++;
-
-       sge->sga = sga_zero;
-       sge->sg_len = reqlen;
-       sge->flags = BFI_SGE_PGDLEN;
-       bfa_sge_to_be(sge);
-}
-
 static void
 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
                 struct fchs_s *fchs)
@@ -846,7 +811,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        struct bfa_rport_s              *rport = reqi->bfa_rport;
 
        bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
-                   bfa_lpuid(bfa));
+                   bfa_fn_lpu(bfa));
 
        send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
        if (rport) {
@@ -860,7 +825,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        }
 
        send_req->vf_id = cpu_to_be16(reqi->vf_id);
-       send_req->lp_tag = reqi->lp_tag;
+       send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
        send_req->class = reqi->class;
        send_req->rsp_timeout = rspi->rsp_timeout;
        send_req->cts = reqi->cts;
@@ -873,18 +838,16 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
         * setup req sgles
         */
        if (fcxp->use_ireqbuf == 1) {
-               hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
+               bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
                                        BFA_FCXP_REQ_PLD_PA(fcxp));
        } else {
                if (fcxp->nreq_sgles > 0) {
                        WARN_ON(fcxp->nreq_sgles != 1);
-                       hal_fcxp_set_local_sges(send_req->req_sge,
-                                               reqi->req_tot_len,
-                                               fcxp->req_sga_cbfn(fcxp->caller,
-                                                                  0));
+                       bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+                               fcxp->req_sga_cbfn(fcxp->caller, 0));
                } else {
                        WARN_ON(reqi->req_tot_len != 0);
-                       hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+                       bfa_alen_set(&send_req->rsp_alen, 0, 0);
                }
        }
 
@@ -894,25 +857,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        if (fcxp->use_irspbuf == 1) {
                WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
 
-               hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
+               bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
                                        BFA_FCXP_RSP_PLD_PA(fcxp));
-
        } else {
                if (fcxp->nrsp_sgles > 0) {
                        WARN_ON(fcxp->nrsp_sgles != 1);
-                       hal_fcxp_set_local_sges(send_req->rsp_sge,
-                                               rspi->rsp_maxlen,
-                                               fcxp->rsp_sga_cbfn(fcxp->caller,
-                                                                  0));
+                       bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+                               fcxp->rsp_sga_cbfn(fcxp->caller, 0));
+
                } else {
                        WARN_ON(rspi->rsp_maxlen != 0);
-                       hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+                       bfa_alen_set(&send_req->rsp_alen, 0, 0);
                }
        }
 
        hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
 
-       bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
+       bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
 
        bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
        bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
@@ -978,8 +939,8 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
        void    *reqbuf;
 
        WARN_ON(fcxp->use_ireqbuf != 1);
-       reqbuf = ((u8 *)mod->req_pld_list_kva) +
-               fcxp->fcxp_tag * mod->req_pld_sz;
+       reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+                               mod->req_pld_sz + mod->rsp_pld_sz);
        return reqbuf;
 }
 
@@ -1002,13 +963,15 @@ void *
 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
 {
        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-       void    *rspbuf;
+       void    *fcxp_buf;
 
        WARN_ON(fcxp->use_irspbuf != 1);
 
-       rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
-               fcxp->fcxp_tag * mod->rsp_pld_sz;
-       return rspbuf;
+       fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+                               mod->req_pld_sz + mod->rsp_pld_sz);
+
+       /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+       return ((u8 *) fcxp_buf) + mod->req_pld_sz;
 }
 
 /*
@@ -1181,6 +1144,18 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
        return mod->rsp_pld_sz;
 }
 
+void
+bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
+{
+       struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
+               bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
+               list_add_tail(qe, &mod->fcxp_unused_q);
+       }
+}
 
 /*
  *  BFA LPS state machine functions
@@ -1192,7 +1167,7 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
 static void
 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1244,7 +1219,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1278,6 +1253,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                break;
 
@@ -1297,7 +1273,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1306,6 +1282,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                bfa_reqq_wcancel(&lps->wqe);
                break;
@@ -1329,7 +1306,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1378,7 +1355,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1420,7 +1397,7 @@ bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1430,6 +1407,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                break;
 
@@ -1444,7 +1422,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1454,6 +1432,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                bfa_reqq_wcancel(&lps->wqe);
                break;
@@ -1473,13 +1452,17 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  * return memory requirement
  */
 static void
-bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-       u32 *dm_len)
+bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
+       struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
+
        if (cfg->drvcfg.min_cfg)
-               *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
+               bfa_mem_kva_setup(minfo, lps_kva,
+                       sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
        else
-               *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
+               bfa_mem_kva_setup(minfo, lps_kva,
+                       sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
 }
 
 /*
@@ -1487,28 +1470,28 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  */
 static void
 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-       struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+       struct bfa_pcidev_s *pcidev)
 {
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
        int                     i;
 
-       memset(mod, 0, sizeof(struct bfa_lps_mod_s));
        mod->num_lps = BFA_LPS_MAX_LPORTS;
        if (cfg->drvcfg.min_cfg)
                mod->num_lps = BFA_LPS_MIN_LPORTS;
        else
                mod->num_lps = BFA_LPS_MAX_LPORTS;
-       mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
+       mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
 
-       bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
+       bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
 
        INIT_LIST_HEAD(&mod->lps_free_q);
        INIT_LIST_HEAD(&mod->lps_active_q);
+       INIT_LIST_HEAD(&mod->lps_login_q);
 
        for (i = 0; i < mod->num_lps; i++, lps++) {
                lps->bfa        = bfa;
-               lps->lp_tag     = (u8) i;
+               lps->bfa_tag    = (u8) i;
                lps->reqq       = BFA_REQQ_LPS;
                bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
                list_add_tail(&lps->qe, &mod->lps_free_q);
@@ -1544,6 +1527,11 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
                lps = (struct bfa_lps_s *) qe;
                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
        }
+       list_for_each_safe(qe, qen, &mod->lps_login_q) {
+               lps = (struct bfa_lps_s *) qe;
+               bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+       }
+       list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
 }
 
 /*
@@ -1555,12 +1543,13 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
 
-       WARN_ON(rsp->lp_tag >= mod->num_lps);
-       lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+       WARN_ON(rsp->bfa_tag >= mod->num_lps);
+       lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
 
        lps->status = rsp->status;
        switch (rsp->status) {
        case BFA_STATUS_OK:
+               lps->fw_tag     = rsp->fw_tag;
                lps->fport      = rsp->f_port;
                if (lps->fport)
                        lps->lp_pid = rsp->lp_pid;
@@ -1572,6 +1561,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
                lps->lp_mac     = rsp->lp_mac;
                lps->brcd_switch = rsp->brcd_switch;
                lps->fcf_mac    = rsp->fcf_mac;
+               lps->pr_bbscn   = rsp->bb_scn;
 
                break;
 
@@ -1586,14 +1576,46 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
 
                break;
 
+       case BFA_STATUS_VPORT_MAX:
+               if (!rsp->ext_status)
+                       bfa_lps_no_res(lps, rsp->ext_status);
+               break;
+
        default:
                /* Nothing to do with other status */
                break;
        }
 
+       list_del(&lps->qe);
+       list_add_tail(&lps->qe, &mod->lps_active_q);
        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
 
+static void
+bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
+{
+       struct bfa_s            *bfa = first_lps->bfa;
+       struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
+       struct list_head        *qe, *qe_next;
+       struct bfa_lps_s        *lps;
+
+       bfa_trc(bfa, count);
+
+       qe = bfa_q_next(first_lps);
+
+       while (count && qe) {
+               qe_next = bfa_q_next(qe);
+               lps = (struct bfa_lps_s *)qe;
+               bfa_trc(bfa, lps->bfa_tag);
+               lps->status = first_lps->status;
+               list_del(&lps->qe);
+               list_add_tail(&lps->qe, &mod->lps_active_q);
+               bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+               qe = qe_next;
+               count--;
+       }
+}
+
 /*
  * Firmware logout response
  */
@@ -1603,8 +1625,8 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
 
-       WARN_ON(rsp->lp_tag >= mod->num_lps);
-       lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+       WARN_ON(rsp->bfa_tag >= mod->num_lps);
+       lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
 
        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
@@ -1618,7 +1640,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
 
-       lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
+       lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
 
        bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
 }
@@ -1653,23 +1675,27 @@ bfa_lps_free(struct bfa_lps_s *lps)
 static void
 bfa_lps_send_login(struct bfa_lps_s *lps)
 {
+       struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
        struct bfi_lps_login_req_s      *m;
 
        m = bfa_reqq_next(lps->bfa, lps->reqq);
        WARN_ON(!m);
 
        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
-               bfa_lpuid(lps->bfa));
+               bfa_fn_lpu(lps->bfa));
 
-       m->lp_tag       = lps->lp_tag;
+       m->bfa_tag      = lps->bfa_tag;
        m->alpa         = lps->alpa;
        m->pdu_size     = cpu_to_be16(lps->pdusz);
        m->pwwn         = lps->pwwn;
        m->nwwn         = lps->nwwn;
        m->fdisc        = lps->fdisc;
        m->auth_en      = lps->auth_en;
+       m->bb_scn       = lps->bb_scn;
 
-       bfa_reqq_produce(lps->bfa, lps->reqq);
+       bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+       list_del(&lps->qe);
+       list_add_tail(&lps->qe, &mod->lps_login_q);
 }
 
 /*
@@ -1684,11 +1710,11 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
        WARN_ON(!m);
 
        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
-               bfa_lpuid(lps->bfa));
+               bfa_fn_lpu(lps->bfa));
 
-       m->lp_tag    = lps->lp_tag;
+       m->fw_tag = lps->fw_tag;
        m->port_name = lps->pwwn;
-       bfa_reqq_produce(lps->bfa, lps->reqq);
+       bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
 }
 
 /*
@@ -1703,11 +1729,11 @@ bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
        WARN_ON(!m);
 
        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
-               bfa_lpuid(lps->bfa));
+               bfa_fn_lpu(lps->bfa));
 
-       m->lp_tag = lps->lp_tag;
+       m->fw_tag = lps->fw_tag;
        m->lp_pid = lps->lp_pid;
-       bfa_reqq_produce(lps->bfa, lps->reqq);
+       bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
 }
 
 /*
@@ -1859,7 +1885,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
  */
 void
 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
-       wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
+       wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
 {
        lps->uarg       = uarg;
        lps->alpa       = alpa;
@@ -1868,6 +1894,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
        lps->nwwn       = nwwn;
        lps->fdisc      = BFA_FALSE;
        lps->auth_en    = auth_en;
+       lps->bb_scn     = bb_scn;
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
@@ -1898,6 +1925,13 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
+u8
+bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
+{
+       struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
+
+       return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
+}
 
 /*
  * Return lport services tag given the pid
@@ -1911,7 +1945,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
 
        for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
                if (lps->lp_pid == pid)
-                       return lps->lp_tag;
+                       return lps->bfa_tag;
        }
 
        /* Return base port tag anyway */
@@ -1936,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
 void
 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, n2n_pid);
 
        lps->lp_pid = n2n_pid;
@@ -1955,15 +1989,15 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        msg.msg = m;
 
        switch (m->mhdr.msg_id) {
-       case BFI_LPS_H2I_LOGIN_RSP:
+       case BFI_LPS_I2H_LOGIN_RSP:
                bfa_lps_login_rsp(bfa, msg.login_rsp);
                break;
 
-       case BFI_LPS_H2I_LOGOUT_RSP:
+       case BFI_LPS_I2H_LOGOUT_RSP:
                bfa_lps_logout_rsp(bfa, msg.logout_rsp);
                break;
 
-       case BFI_LPS_H2I_CVL_EVENT:
+       case BFI_LPS_I2H_CVL_EVENT:
                bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
                break;
 
@@ -2777,10 +2811,12 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
                                                        BFA_CACHELINE_SZ))
 
 static void
-bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-               u32 *dm_len)
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+                  struct bfa_s *bfa)
 {
-       *dm_len += FCPORT_STATS_DMA_SZ;
+       struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
+
+       bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
 }
 
 static void
@@ -2792,23 +2828,14 @@ bfa_fcport_qresume(void *cbarg)
 }
 
 static void
-bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
 {
-       u8              *dm_kva;
-       u64     dm_pa;
+       struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
 
-       dm_kva = bfa_meminfo_dma_virt(meminfo);
-       dm_pa  = bfa_meminfo_dma_phys(meminfo);
-
-       fcport->stats_kva = dm_kva;
-       fcport->stats_pa  = dm_pa;
-       fcport->stats     = (union bfa_fcport_stats_u *) dm_kva;
-
-       dm_kva += FCPORT_STATS_DMA_SZ;
-       dm_pa  += FCPORT_STATS_DMA_SZ;
-
-       bfa_meminfo_dma_virt(meminfo) = dm_kva;
-       bfa_meminfo_dma_phys(meminfo) = dm_pa;
+       fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
+       fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
+       fcport->stats = (union bfa_fcport_stats_u *)
+                               bfa_mem_dma_virt(fcport_dma);
 }
 
 /*
@@ -2816,18 +2843,17 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
  */
 static void
 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
        struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
        struct bfa_fcport_ln_s *ln = &fcport->ln;
        struct timeval tv;
 
-       memset(fcport, 0, sizeof(struct bfa_fcport_s));
        fcport->bfa = bfa;
        ln->fcport = fcport;
 
-       bfa_fcport_mem_claim(fcport, meminfo);
+       bfa_fcport_mem_claim(fcport);
 
        bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
        bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
@@ -2921,6 +2947,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
 {
        fcport->speed = BFA_PORT_SPEED_UNKNOWN;
        fcport->topology = BFA_PORT_TOPOLOGY_NONE;
+       fcport->bbsc_op_state = BFA_FALSE;
 }
 
 /*
@@ -2948,7 +2975,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
-                       bfa_lpuid(fcport->bfa));
+                       bfa_fn_lpu(fcport->bfa));
        m->nwwn = fcport->nwwn;
        m->pwwn = fcport->pwwn;
        m->port_cfg = fcport->cfg;
@@ -2962,7 +2989,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -2991,13 +3018,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
-                       bfa_lpuid(fcport->bfa));
+                       bfa_fn_lpu(fcport->bfa));
        m->msgtag = fcport->msgtag;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
 
        return BFA_TRUE;
 }
@@ -3029,13 +3056,14 @@ bfa_fcport_send_txcredit(void *port_cbarg)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
-                       bfa_lpuid(fcport->bfa));
+                       bfa_fn_lpu(fcport->bfa));
        m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
+       m->bb_scn = fcport->cfg.bb_scn;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
 }
 
 static void
@@ -3139,8 +3167,8 @@ bfa_fcport_send_stats_get(void *cbarg)
 
        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
-                       bfa_lpuid(fcport->bfa));
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+                       bfa_fn_lpu(fcport->bfa));
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
 }
 
 static void
@@ -3201,8 +3229,8 @@ bfa_fcport_send_stats_clear(void *cbarg)
 
        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
-                       bfa_lpuid(fcport->bfa));
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+                       bfa_fn_lpu(fcport->bfa));
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
 }
 
 /*
@@ -3329,6 +3357,9 @@ bfa_fcport_init(struct bfa_s *bfa)
        fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
        fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
 
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               bfa->modules.port.pbc_disabled = BFA_TRUE;
+
        WARN_ON(!fcport->cfg.maxfrsize);
        WARN_ON(!fcport->cfg.rx_bbcredit);
        WARN_ON(!fcport->speed_sup);
@@ -3453,6 +3484,9 @@ bfa_fcport_enable(struct bfa_s *bfa)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               return BFA_STATUS_PBC;
+
        if (bfa_ioc_is_disabled(&bfa->ioc))
                return BFA_STATUS_IOC_DISABLED;
 
@@ -3466,6 +3500,8 @@ bfa_fcport_enable(struct bfa_s *bfa)
 bfa_status_t
 bfa_fcport_disable(struct bfa_s *bfa)
 {
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               return BFA_STATUS_PBC;
 
        if (bfa_ioc_is_disabled(&bfa->ioc))
                return BFA_STATUS_IOC_DISABLED;
@@ -3474,6 +3510,21 @@ bfa_fcport_disable(struct bfa_s *bfa)
        return BFA_STATUS_OK;
 }
 
+/* If PBC is disabled on port, return error */
+bfa_status_t
+bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+       struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+       struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+       if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
+               bfa_trc(bfa, fcport->pwwn);
+               return BFA_STATUS_PBC;
+       }
+       return BFA_STATUS_OK;
+}
+
 /*
  * Configure port speed.
  */
@@ -3491,6 +3542,28 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
                return BFA_STATUS_UNSUPP_SPEED;
        }
 
+       /* For Mezz card, port speed entered needs to be checked */
+       if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
+               if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+                       /* For CT2, 1G is not supported */
+                       if ((speed == BFA_PORT_SPEED_1GBPS) &&
+                           (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+                               return BFA_STATUS_UNSUPP_SPEED;
+
+                       /* Already checked for Auto Speed and Max Speed supp */
+                       if (!(speed == BFA_PORT_SPEED_1GBPS ||
+                             speed == BFA_PORT_SPEED_2GBPS ||
+                             speed == BFA_PORT_SPEED_4GBPS ||
+                             speed == BFA_PORT_SPEED_8GBPS ||
+                             speed == BFA_PORT_SPEED_16GBPS ||
+                             speed == BFA_PORT_SPEED_AUTO))
+                               return BFA_STATUS_UNSUPP_SPEED;
+               } else {
+                       if (speed != BFA_PORT_SPEED_10GBPS)
+                               return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+
        fcport->cfg.speed = speed;
 
        return BFA_STATUS_OK;
@@ -3624,11 +3697,14 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
 }
 
 void
-bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
        fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
+       fcport->cfg.bb_scn = bb_scn;
+       if (bb_scn)
+               fcport->bbsc_op_state = BFA_TRUE;
        bfa_fcport_send_txcredit(fcport);
 }
 
@@ -3675,16 +3751,23 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
        /* beacon attributes */
        attr->beacon = fcport->beacon;
        attr->link_e2e_beacon = fcport->link_e2e_beacon;
-       attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
-       attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
 
        attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
        attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
        attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
-       if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
-               attr->port_state = BFA_PORT_ST_IOCDIS;
-       else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
-               attr->port_state = BFA_PORT_ST_FWMISMATCH;
+       attr->bbsc_op_status =  fcport->bbsc_op_state;
+
+       /* PBC Disabled State */
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
+       else {
+               if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
+                       attr->port_state = BFA_PORT_ST_IOCDIS;
+               else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
+                       attr->port_state = BFA_PORT_ST_FWMISMATCH;
+               else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
+                       attr->port_state = BFA_PORT_ST_ACQ_ADDR;
+       }
 
        /* FCoE vlan */
        attr->fcoe_vlan = fcport->fcoe_vlan;
@@ -3765,6 +3848,18 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa)
 
 }
 
+/*
+ *     Enable/Disable FAA feature in port config
+ */
+void
+bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+       bfa_trc(bfa, state);
+       fcport->cfg.faa_state = state;
+}
+
 /*
  * Get default minimum ratelim speed
  */
@@ -3778,6 +3873,22 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
 
 }
 
+void
+bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+                 bfa_boolean_t link_e2e_beacon)
+{
+       struct bfa_s *bfa = dev;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+       bfa_trc(bfa, beacon);
+       bfa_trc(bfa, link_e2e_beacon);
+       bfa_trc(bfa, fcport->beacon);
+       bfa_trc(bfa, fcport->link_e2e_beacon);
+
+       fcport->beacon = beacon;
+       fcport->link_e2e_beacon = link_e2e_beacon;
+}
+
 bfa_boolean_t
 bfa_fcport_is_linkup(struct bfa_s *bfa)
 {
@@ -3797,6 +3908,14 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
        return fcport->cfg.qos_enabled;
 }
 
+bfa_boolean_t
+bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+       return fcport->cfg.trunked;
+}
+
 /*
  * Rport State machine functions
  */
@@ -4286,18 +4405,22 @@ bfa_rport_qresume(void *cbarg)
 }
 
 static void
-bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
+       struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
+
        if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
                cfg->fwcfg.num_rports = BFA_RPORT_MIN;
 
-       *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, rport_kva,
+               cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
 }
 
 static void
 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                    struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
        struct bfa_rport_s *rp;
@@ -4305,8 +4428,9 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        INIT_LIST_HEAD(&mod->rp_free_q);
        INIT_LIST_HEAD(&mod->rp_active_q);
+       INIT_LIST_HEAD(&mod->rp_unused_q);
 
-       rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
+       rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
        mod->rps_list = rp;
        mod->num_rports = cfg->fwcfg.num_rports;
 
@@ -4331,7 +4455,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        /*
         * consume memory
         */
-       bfa_meminfo_kva(meminfo) = (u8 *) rp;
+       bfa_mem_kva_curp(mod) = (u8 *) rp;
 }
 
 static void
@@ -4356,6 +4480,9 @@ bfa_rport_iocdisable(struct bfa_s *bfa)
        struct bfa_rport_s *rport;
        struct list_head *qe, *qen;
 
+       /* Enqueue unused rport resources to free_q */
+       list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
+
        list_for_each_safe(qe, qen, &mod->rp_active_q) {
                rport = (struct bfa_rport_s *) qe;
                bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
@@ -4399,11 +4526,11 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
-                       bfa_lpuid(rp->bfa));
+                       bfa_fn_lpu(rp->bfa));
        m->bfa_handle = rp->rport_tag;
        m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
        m->pid = rp->rport_info.pid;
-       m->lp_tag = rp->rport_info.lp_tag;
+       m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
        m->local_pid = rp->rport_info.local_pid;
        m->fc_class = rp->rport_info.fc_class;
        m->vf_en = rp->rport_info.vf_en;
@@ -4413,7 +4540,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -4432,13 +4559,13 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
-                       bfa_lpuid(rp->bfa));
+                       bfa_fn_lpu(rp->bfa));
        m->fw_handle = rp->fw_handle;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -4457,14 +4584,14 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
-                       bfa_lpuid(rp->bfa));
+                       bfa_fn_lpu(rp->bfa));
        m->fw_handle = rp->fw_handle;
        m->speed = (u8)rp->rport_info.speed;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -4514,7 +4641,18 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        }
 }
 
+void
+bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
+{
+       struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
 
+       for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
+               bfa_q_deq_tail(&mod->rp_free_q, &qe);
+               list_add_tail(qe, &mod->rp_unused_q);
+       }
+}
 
 /*
  *  bfa_rport_api
@@ -4577,26 +4715,51 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
  * Compute and return memory needed by FCP(im) module.
  */
 static void
-bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
+       struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
+       struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_sgpg, num_sgpg;
+       u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
+
        if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
                cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+       else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
+               cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
 
-       *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
-       *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
-}
+       num_sgpg = cfg->drvcfg.num_sgpgs;
 
+       nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+       per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
+
+       bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
+               if (num_sgpg >= per_seg_sgpg) {
+                       num_sgpg -= per_seg_sgpg;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                                       per_seg_sgpg * sgpg_sz);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                                       num_sgpg * sgpg_sz);
+       }
+
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, sgpg_kva,
+               cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
+}
 
 static void
 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                   struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
-       int i;
        struct bfa_sgpg_s *hsgpg;
        struct bfi_sgpg_s *sgpg;
        u64 align_len;
+       struct bfa_mem_dma_s *seg_ptr;
+       u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
+       u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
 
        union {
                u64 pa;
@@ -4608,39 +4771,45 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
 
-       mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
-       mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
-       align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
-       mod->sgpg_arr_pa += align_len;
-       mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
-                                               align_len);
-       mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
-                                               align_len);
-
-       hsgpg = mod->hsgpg_arr;
-       sgpg = mod->sgpg_arr;
-       sgpg_pa.pa = mod->sgpg_arr_pa;
-       mod->free_sgpgs = mod->num_sgpgs;
-
-       WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
-
-       for (i = 0; i < mod->num_sgpgs; i++) {
-               memset(hsgpg, 0, sizeof(*hsgpg));
-               memset(sgpg, 0, sizeof(*sgpg));
-
-               hsgpg->sgpg = sgpg;
-               sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
-               hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
-               list_add_tail(&hsgpg->qe, &mod->sgpg_q);
-
-               hsgpg++;
-               sgpg++;
-               sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
+       mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
+
+       num_sgpg = cfg->drvcfg.num_sgpgs;
+       nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+
+       /* dma/kva mem claim */
+       hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
+
+       bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
+
+               if (!bfa_mem_dma_virt(seg_ptr))
+                       break;
+
+               align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
+                                            bfa_mem_dma_phys(seg_ptr);
+
+               sgpg = (struct bfi_sgpg_s *)
+                       (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
+               sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
+               WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
+
+               per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
+
+               for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
+                       memset(hsgpg, 0, sizeof(*hsgpg));
+                       memset(sgpg, 0, sizeof(*sgpg));
+
+                       hsgpg->sgpg = sgpg;
+                       sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
+                       hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
+                       list_add_tail(&hsgpg->qe, &mod->sgpg_q);
+
+                       sgpg++;
+                       hsgpg++;
+                       sgpg_pa.pa += sgpg_sz;
+               }
        }
 
-       bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
-       bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
-       bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
+       bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
 }
 
 static void
@@ -4782,31 +4951,13 @@ __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
 }
 
 static void
-claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
-       u32 uf_pb_tot_sz;
-
-       ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
-       ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
-       uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
-                                                       BFA_DMA_ALIGN_SZ);
-
-       bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
-       bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
-
-       memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
-}
-
-static void
-claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
 {
        struct bfi_uf_buf_post_s *uf_bp_msg;
-       struct bfi_sge_s      *sge;
-       union bfi_addr_u      sga_zero = { {0} };
        u16 i;
        u16 buf_len;
 
-       ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
+       ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
        uf_bp_msg = ufm->uf_buf_posts;
 
        for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
@@ -4817,28 +4968,18 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
                buf_len = sizeof(struct bfa_uf_buf_s);
                uf_bp_msg->buf_len = cpu_to_be16(buf_len);
                bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
-                           bfa_lpuid(ufm->bfa));
-
-               sge = uf_bp_msg->sge;
-               sge[0].sg_len = buf_len;
-               sge[0].flags = BFI_SGE_DATA_LAST;
-               bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
-               bfa_sge_to_be(sge);
-
-               sge[1].sg_len = buf_len;
-               sge[1].flags = BFI_SGE_PGDLEN;
-               sge[1].sga = sga_zero;
-               bfa_sge_to_be(&sge[1]);
+                           bfa_fn_lpu(ufm->bfa));
+               bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
        }
 
        /*
         * advance pointer beyond consumed memory
         */
-       bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
+       bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
 }
 
 static void
-claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+claim_ufs(struct bfa_uf_mod_s *ufm)
 {
        u16 i;
        struct bfa_uf_s   *uf;
@@ -4846,7 +4987,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
        /*
         * Claim block of memory for UF list
         */
-       ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
+       ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
 
        /*
         * Initialize UFs and queue it in UF free queue
@@ -4855,8 +4996,8 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
                memset(uf, 0, sizeof(struct bfa_uf_s));
                uf->bfa = ufm->bfa;
                uf->uf_tag = i;
-               uf->pb_len = sizeof(struct bfa_uf_buf_s);
-               uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
+               uf->pb_len = BFA_PER_UF_DMA_SZ;
+               uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
                uf->buf_pa = ufm_pbs_pa(ufm, i);
                list_add_tail(&uf->qe, &ufm->uf_free_q);
        }
@@ -4864,48 +5005,57 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
        /*
         * advance memory pointer
         */
-       bfa_meminfo_kva(mi) = (u8 *) uf;
+       bfa_mem_kva_curp(ufm) = (u8 *) uf;
 }
 
 static void
-uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+uf_mem_claim(struct bfa_uf_mod_s *ufm)
 {
-       claim_uf_pbs(ufm, mi);
-       claim_ufs(ufm, mi);
-       claim_uf_post_msgs(ufm, mi);
+       claim_ufs(ufm);
+       claim_uf_post_msgs(ufm);
 }
 
 static void
-bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
+bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
-       u32 num_ufs = cfg->fwcfg.num_uf_bufs;
-
-       /*
-        * dma-able memory for UF posted bufs
-        */
-       *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
-                                                       BFA_DMA_ALIGN_SZ);
+       struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+       struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
+       u32     num_ufs = cfg->fwcfg.num_uf_bufs;
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_uf = 0;
+
+       nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
+       per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
+
+       bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
+               if (num_ufs >= per_seg_uf) {
+                       num_ufs -= per_seg_uf;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               per_seg_uf * BFA_PER_UF_DMA_SZ);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               num_ufs * BFA_PER_UF_DMA_SZ);
+       }
 
-       /*
-        * kernel Virtual memory for UFs and UF buf post msg copies
-        */
-       *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
-       *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
+               (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
 }
 
 static void
 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
 
-       memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
        ufm->bfa = bfa;
        ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
        INIT_LIST_HEAD(&ufm->uf_free_q);
        INIT_LIST_HEAD(&ufm->uf_posted_q);
+       INIT_LIST_HEAD(&ufm->uf_unused_q);
 
-       uf_mem_claim(ufm, meminfo);
+       uf_mem_claim(ufm);
 }
 
 static void
@@ -4939,7 +5089,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
 
        memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
                      sizeof(struct bfi_uf_buf_post_s));
-       bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
+       bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
 
        bfa_trc(ufm->bfa, uf->uf_tag);
 
@@ -4963,11 +5113,15 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
 {
        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
        u16 uf_tag = m->buf_tag;
-       struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
        struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
-       u8 *buf = &uf_buf->d[0];
+       struct bfa_uf_buf_s *uf_buf;
+       uint8_t *buf;
        struct fchs_s *fchs;
 
+       uf_buf = (struct bfa_uf_buf_s *)
+                       bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
+       buf = &uf_buf->d[0];
+
        m->frm_len = be16_to_cpu(m->frm_len);
        m->xfr_len = be16_to_cpu(m->xfr_len);
 
@@ -5008,6 +5162,9 @@ bfa_uf_iocdisable(struct bfa_s *bfa)
        struct bfa_uf_s *uf;
        struct list_head *qe, *qen;
 
+       /* Enqueue unused uf resources to free_q */
+       list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
+
        list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
                uf = (struct bfa_uf_s *) qe;
                list_del(&uf->qe);
@@ -5072,4 +5229,415 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
        }
 }
 
+void
+bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
+{
+       struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
+               bfa_q_deq_tail(&mod->uf_free_q, &qe);
+               list_add_tail(qe, &mod->uf_unused_q);
+       }
+}
+
+/*
+ *     BFA fcdiag module
+ */
+#define BFA_DIAG_QTEST_TOV     1000    /* msec */
+
+/*
+ *     Set port status to busy
+ */
+static void
+bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
+
+       if (fcdiag->lb.lock)
+               fcport->diag_busy = BFA_TRUE;
+       else
+               fcport->diag_busy = BFA_FALSE;
+}
+
+static void
+bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+               struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_pcidev_s *pcidev)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       fcdiag->bfa             = bfa;
+       fcdiag->trcmod  = bfa->trcmod;
+       /* The common DIAG attach bfa_diag_attach() will do all memory claim */
+}
+
+static void
+bfa_fcdiag_iocdisable(struct bfa_s *bfa)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       bfa_trc(fcdiag, fcdiag->lb.lock);
+       if (fcdiag->lb.lock) {
+               fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
+               fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+               fcdiag->lb.lock = 0;
+               bfa_fcdiag_set_busy_status(fcdiag);
+       }
+}
+
+static void
+bfa_fcdiag_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_queuetest_timeout(void *cbarg)
+{
+       struct bfa_fcdiag_s       *fcdiag = cbarg;
+       struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+
+       bfa_trc(fcdiag, fcdiag->qtest.all);
+       bfa_trc(fcdiag, fcdiag->qtest.count);
+
+       fcdiag->qtest.timer_active = 0;
+
+       res->status = BFA_STATUS_ETIMER;
+       res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+       if (fcdiag->qtest.all)
+               res->queue  = fcdiag->qtest.all;
+
+       bfa_trc(fcdiag, BFA_STATUS_ETIMER);
+       fcdiag->qtest.status = BFA_STATUS_ETIMER;
+       fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+       fcdiag->qtest.lock = 0;
+}
+
+static bfa_status_t
+bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
+{
+       u32     i;
+       struct bfi_diag_qtest_req_s *req;
+
+       req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
+       if (!req)
+               return BFA_STATUS_DEVBUSY;
+
+       /* build host command */
+       bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
+               bfa_fn_lpu(fcdiag->bfa));
+
+       for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
+               req->data[i] = QTEST_PAT_DEFAULT;
+
+       bfa_trc(fcdiag, fcdiag->qtest.queue);
+       /* ring door bell */
+       bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
+       return BFA_STATUS_OK;
+}
+
+static void
+bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
+                       bfi_diag_qtest_rsp_t *rsp)
+{
+       struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+       bfa_status_t status = BFA_STATUS_OK;
+       int i;
+
+       /* Check timer, should still be active   */
+       if (!fcdiag->qtest.timer_active) {
+               bfa_trc(fcdiag, fcdiag->qtest.timer_active);
+               return;
+       }
+
+       /* update count */
+       fcdiag->qtest.count--;
+
+       /* Check result */
+       for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
+               if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
+                       res->status = BFA_STATUS_DATACORRUPTED;
+                       break;
+               }
+       }
+
+       if (res->status == BFA_STATUS_OK) {
+               if (fcdiag->qtest.count > 0) {
+                       status = bfa_fcdiag_queuetest_send(fcdiag);
+                       if (status == BFA_STATUS_OK)
+                               return;
+                       else
+                               res->status = status;
+               } else if (fcdiag->qtest.all > 0 &&
+                       fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
+                       fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+                       fcdiag->qtest.queue++;
+                       status = bfa_fcdiag_queuetest_send(fcdiag);
+                       if (status == BFA_STATUS_OK)
+                               return;
+                       else
+                               res->status = status;
+               }
+       }
+
+       /* Stop timer when we comp all queue */
+       if (fcdiag->qtest.timer_active) {
+               bfa_timer_stop(&fcdiag->qtest.timer);
+               fcdiag->qtest.timer_active = 0;
+       }
+       res->queue = fcdiag->qtest.queue;
+       res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+       bfa_trc(fcdiag, res->count);
+       bfa_trc(fcdiag, res->status);
+       fcdiag->qtest.status = res->status;
+       fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+       fcdiag->qtest.lock = 0;
+}
+
+static void
+bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
+                       struct bfi_diag_lb_rsp_s *rsp)
+{
+       struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
+
+       res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
+       res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
+       res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
+       res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
+       res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
+       res->status     = rsp->res.status;
+       fcdiag->lb.status = rsp->res.status;
+       bfa_trc(fcdiag, fcdiag->lb.status);
+       fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+       fcdiag->lb.lock = 0;
+       bfa_fcdiag_set_busy_status(fcdiag);
+}
+
+static bfa_status_t
+bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
+                       struct bfa_diag_loopback_s *loopback)
+{
+       struct bfi_diag_lb_req_s *lb_req;
+
+       lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
+       if (!lb_req)
+               return BFA_STATUS_DEVBUSY;
+
+       /* build host command */
+       bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
+               bfa_fn_lpu(fcdiag->bfa));
+
+       lb_req->lb_mode = loopback->lb_mode;
+       lb_req->speed = loopback->speed;
+       lb_req->loopcnt = loopback->loopcnt;
+       lb_req->pattern = loopback->pattern;
+
+       /* ring door bell */
+       bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
+
+       bfa_trc(fcdiag, loopback->lb_mode);
+       bfa_trc(fcdiag, loopback->speed);
+       bfa_trc(fcdiag, loopback->loopcnt);
+       bfa_trc(fcdiag, loopback->pattern);
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     cpe/rme intr handler
+ */
+void
+bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+       switch (msg->mhdr.msg_id) {
+       case BFI_DIAG_I2H_LOOPBACK:
+               bfa_fcdiag_loopback_comp(fcdiag,
+                               (struct bfi_diag_lb_rsp_s *) msg);
+               break;
+       case BFI_DIAG_I2H_QTEST:
+               bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
+               break;
+       default:
+               bfa_trc(fcdiag, msg->mhdr.msg_id);
+               WARN_ON(1);
+       }
+}
+
+/*
+ *     Loopback test
+ *
+ *   @param[in] *bfa            - bfa data struct
+ *   @param[in] opmode          - port operation mode
+ *   @param[in] speed           - port speed
+ *   @param[in] lpcnt           - loop count
+ *   @param[in] pat                     - pattern to build packet
+ *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] cbarg           - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
+               enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+               struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
+               void *cbarg)
+{
+       struct  bfa_diag_loopback_s loopback;
+       struct bfa_port_attr_s attr;
+       bfa_status_t status;
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+       if (!bfa_iocfc_is_operational(bfa))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* if port is PBC disabled, return error */
+       if (bfa_fcport_is_pbcdisabled(bfa)) {
+               bfa_trc(fcdiag, BFA_STATUS_PBC);
+               return BFA_STATUS_PBC;
+       }
 
+       if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
+               bfa_trc(fcdiag, opmode);
+               return BFA_STATUS_PORT_NOT_DISABLED;
+       }
+
+       /* Check if the speed is supported */
+       bfa_fcport_get_attr(bfa, &attr);
+       bfa_trc(fcdiag, attr.speed_supported);
+       if (speed > attr.speed_supported)
+               return BFA_STATUS_UNSUPP_SPEED;
+
+       /* For Mezz card, port speed entered needs to be checked */
+       if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
+               if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+                       if ((speed == BFA_PORT_SPEED_1GBPS) &&
+                           (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+                               return BFA_STATUS_UNSUPP_SPEED;
+                       if (!(speed == BFA_PORT_SPEED_1GBPS ||
+                             speed == BFA_PORT_SPEED_2GBPS ||
+                             speed == BFA_PORT_SPEED_4GBPS ||
+                             speed == BFA_PORT_SPEED_8GBPS ||
+                             speed == BFA_PORT_SPEED_16GBPS ||
+                             speed == BFA_PORT_SPEED_AUTO))
+                               return BFA_STATUS_UNSUPP_SPEED;
+               } else {
+                       if (speed != BFA_PORT_SPEED_10GBPS)
+                               return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+
+       /* check to see if there is another destructive diag cmd running */
+       if (fcdiag->lb.lock) {
+               bfa_trc(fcdiag, fcdiag->lb.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       fcdiag->lb.lock = 1;
+       loopback.lb_mode = opmode;
+       loopback.speed = speed;
+       loopback.loopcnt = lpcnt;
+       loopback.pattern = pat;
+       fcdiag->lb.result = result;
+       fcdiag->lb.cbfn = cbfn;
+       fcdiag->lb.cbarg = cbarg;
+       memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
+       bfa_fcdiag_set_busy_status(fcdiag);
+
+       /* Send msg to fw */
+       status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
+       return status;
+}
+
+/*
+ *     DIAG queue test command
+ *
+ *   @param[in] *bfa            - bfa data struct
+ *   @param[in] force           - 1: don't do ioc op checking
+ *   @param[in] queue           - queue no. to test
+ *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
+               struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
+               void *cbarg)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       bfa_status_t status;
+       bfa_trc(fcdiag, force);
+       bfa_trc(fcdiag, queue);
+
+       if (!force && !bfa_iocfc_is_operational(bfa))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* check to see if there is another destructive diag cmd running */
+       if (fcdiag->qtest.lock) {
+               bfa_trc(fcdiag, fcdiag->qtest.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       /* Initialization */
+       fcdiag->qtest.lock = 1;
+       fcdiag->qtest.cbfn = cbfn;
+       fcdiag->qtest.cbarg = cbarg;
+       fcdiag->qtest.result = result;
+       fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+
+       /* Init test results */
+       fcdiag->qtest.result->status = BFA_STATUS_OK;
+       fcdiag->qtest.result->count  = 0;
+
+       /* send */
+       if (queue < BFI_IOC_MAX_CQS) {
+               fcdiag->qtest.result->queue  = (u8)queue;
+               fcdiag->qtest.queue = (u8)queue;
+               fcdiag->qtest.all   = 0;
+       } else {
+               fcdiag->qtest.result->queue  = 0;
+               fcdiag->qtest.queue = 0;
+               fcdiag->qtest.all   = 1;
+       }
+       status = bfa_fcdiag_queuetest_send(fcdiag);
+
+       /* Start a timer */
+       if (status == BFA_STATUS_OK) {
+               bfa_timer_start(bfa, &fcdiag->qtest.timer,
+                               bfa_fcdiag_queuetest_timeout, fcdiag,
+                               BFA_DIAG_QTEST_TOV);
+               fcdiag->qtest.timer_active = 1;
+       }
+       return status;
+}
+
+/*
+ * DIAG PLB is running
+ *
+ *   @param[in] *bfa    - bfa data struct
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
+}
index 5902a45..fbe513a 100644 (file)
@@ -26,6 +26,7 @@
  * Scatter-gather DMA related defines
  */
 #define BFA_SGPG_MIN   (16)
+#define BFA_SGPG_MAX   (8192)
 
 /*
  * Alignment macro for SG page allocation
@@ -54,17 +55,21 @@ struct bfa_sgpg_s {
  */
 #define BFA_SGPG_NPAGE(_nsges)  (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
 
+/* Max SGPG dma segs required */
+#define BFA_SGPG_DMA_SEGS      \
+       BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
+
 struct bfa_sgpg_mod_s {
        struct bfa_s *bfa;
        int             num_sgpgs;      /*  number of SG pages          */
        int             free_sgpgs;     /*  number of free SG pages     */
-       struct bfa_sgpg_s       *hsgpg_arr;     /*  BFA SG page array   */
-       struct bfi_sgpg_s *sgpg_arr;    /*  actual SG page array        */
-       u64     sgpg_arr_pa;    /*  SG page array DMA addr      */
        struct list_head        sgpg_q;         /*  queue of free SG pages */
        struct list_head        sgpg_wait_q;    /*  wait queue for SG pages */
+       struct bfa_mem_dma_s    dma_seg[BFA_SGPG_DMA_SEGS];
+       struct bfa_mem_kva_s    kva_seg;
 };
 #define BFA_SGPG_MOD(__bfa)    (&(__bfa)->modules.sgpg_mod)
+#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
 
 bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
                             int nsgpgs);
@@ -79,26 +84,32 @@ void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
  * FCXP related defines
  */
 #define BFA_FCXP_MIN           (1)
+#define BFA_FCXP_MAX           (256)
 #define BFA_FCXP_MAX_IBUF_SZ   (2 * 1024 + 256)
 #define BFA_FCXP_MAX_LBUF_SZ   (4 * 1024 + 256)
 
+/* Max FCXP dma segs required */
+#define BFA_FCXP_DMA_SEGS                                              \
+       BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX,                                 \
+               (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
+
 struct bfa_fcxp_mod_s {
        struct bfa_s      *bfa;         /* backpointer to BFA */
        struct bfa_fcxp_s *fcxp_list;   /* array of FCXPs */
        u16     num_fcxps;      /* max num FCXP requests */
        struct list_head  fcxp_free_q;  /* free FCXPs */
        struct list_head  fcxp_active_q;        /* active FCXPs */
-       void            *req_pld_list_kva;      /* list of FCXP req pld */
-       u64     req_pld_list_pa;        /* list of FCXP req pld */
-       void            *rsp_pld_list_kva;      /* list of FCXP resp pld */
-       u64     rsp_pld_list_pa;        /* list of FCXP resp pld */
        struct list_head  wait_q;               /* wait queue for free fcxp */
+       struct list_head fcxp_unused_q; /* unused fcxps */
        u32     req_pld_sz;
        u32     rsp_pld_sz;
+       struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
+       struct bfa_mem_kva_s kva_seg;
 };
 
 #define BFA_FCXP_MOD(__bfa)            (&(__bfa)->modules.fcxp_mod)
 #define BFA_FCXP_FROM_TAG(__mod, __tag)        (&(__mod)->fcxp_list[__tag])
+#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
 
 typedef void    (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
                                   void *cb_arg, bfa_status_t req_status,
@@ -206,13 +217,15 @@ struct bfa_fcxp_wqe_s {
 #define BFA_FCXP_RSP_FCHS(_fcxp)       (&((_fcxp)->rsp_info.fchs))
 #define BFA_FCXP_RSP_PLD(_fcxp)                (bfa_fcxp_get_rspbuf(_fcxp))
 
-#define BFA_FCXP_REQ_PLD_PA(_fcxp)                             \
-       ((_fcxp)->fcxp_mod->req_pld_list_pa +                   \
-        ((_fcxp)->fcxp_mod->req_pld_sz  * (_fcxp)->fcxp_tag))
+#define BFA_FCXP_REQ_PLD_PA(_fcxp)                                           \
+       bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag,           \
+               (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
 
-#define BFA_FCXP_RSP_PLD_PA(_fcxp)                             \
-       ((_fcxp)->fcxp_mod->rsp_pld_list_pa +                   \
-        ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
+/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+#define BFA_FCXP_RSP_PLD_PA(_fcxp)                                            \
+       (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag,           \
+             (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
+             (_fcxp)->fcxp_mod->req_pld_sz)
 
 void   bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
@@ -238,10 +251,13 @@ struct bfa_rport_mod_s {
        struct bfa_rport_s *rps_list;   /*  list of rports      */
        struct list_head        rp_free_q;      /*  free bfa_rports     */
        struct list_head        rp_active_q;    /*  free bfa_rports     */
+       struct list_head        rp_unused_q;    /*  unused bfa rports  */
        u16     num_rports;     /*  number of rports    */
+       struct bfa_mem_kva_s    kva_seg;
 };
 
 #define BFA_RPORT_MOD(__bfa)   (&(__bfa)->modules.rport_mod)
+#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
 
 /*
  * Convert rport tag to RPORT
@@ -254,6 +270,7 @@ struct bfa_rport_mod_s {
  * protected functions
  */
 void   bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void   bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw);
 
 /*
  *     BFA rport information.
@@ -298,7 +315,7 @@ struct bfa_rport_s {
  */
 
 #define BFA_UF_MIN     (4)
-
+#define BFA_UF_MAX     (256)
 
 struct bfa_uf_s {
        struct list_head        qe;     /*  queue element               */
@@ -326,36 +343,41 @@ struct bfa_uf_s {
  */
 typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
 
+#define BFA_UF_BUFSZ   (2 * 1024 + 256)
+
+struct bfa_uf_buf_s {
+       u8      d[BFA_UF_BUFSZ];
+};
+
+#define BFA_PER_UF_DMA_SZ      \
+       (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
+
+/* Max UF dma segs required */
+#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
+
 struct bfa_uf_mod_s {
        struct bfa_s *bfa;              /*  back pointer to BFA */
        struct bfa_uf_s *uf_list;       /*  array of UFs */
        u16     num_ufs;        /*  num unsolicited rx frames */
        struct list_head        uf_free_q;      /*  free UFs */
        struct list_head        uf_posted_q;    /*  UFs posted to IOC */
-       struct bfa_uf_buf_s *uf_pbs_kva;        /*  list UF bufs request pld */
-       u64     uf_pbs_pa;      /*  phy addr for UF bufs */
+       struct list_head        uf_unused_q;    /*  unused UF's */
        struct bfi_uf_buf_post_s *uf_buf_posts;
        /*  pre-built UF post msgs */
        bfa_cb_uf_recv_t ufrecv;        /*  uf recv handler function */
        void            *cbarg;         /*  uf receive handler arg */
+       struct bfa_mem_dma_s    dma_seg[BFA_UF_DMA_SEGS];
+       struct bfa_mem_kva_s    kva_seg;
 };
 
 #define BFA_UF_MOD(__bfa)      (&(__bfa)->modules.uf_mod)
+#define BFA_MEM_UF_KVA(__bfa)  (&(BFA_UF_MOD(__bfa)->kva_seg))
 
 #define ufm_pbs_pa(_ufmod, _uftag)                                     \
-       ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
+       bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
 
 void   bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-#define BFA_UF_BUFSZ   (2 * 1024 + 256)
-
-/*
- * @todo private
- */
-struct bfa_uf_buf_s {
-       u8              d[BFA_UF_BUFSZ];
-};
-
+void   bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
 
 /*
  * LPS - bfa lport login/logout service interface
@@ -364,7 +386,8 @@ struct bfa_lps_s {
        struct list_head        qe;     /*  queue element               */
        struct bfa_s    *bfa;           /*  parent bfa instance */
        bfa_sm_t        sm;             /*  finite state machine        */
-       u8              lp_tag;         /*  lport tag                   */
+       u8              bfa_tag;        /*  lport tag           */
+       u8              fw_tag;         /*  lport fw tag                */
        u8              reqq;           /*  lport request queue */
        u8              alpa;           /*  ALPA for loop topologies    */
        u32     lp_pid;         /*  lport port ID               */
@@ -377,6 +400,8 @@ struct bfa_lps_s {
        bfa_status_t    status;         /*  login status                */
        u16             pdusz;          /*  max receive PDU size        */
        u16             pr_bbcred;      /*  BB_CREDIT from peer         */
+       u8              pr_bbscn;       /*  BB_SCN from peer            */
+       u8              bb_scn;         /*  local BB_SCN                */
        u8              lsrjt_rsn;      /*  LSRJT reason                */
        u8              lsrjt_expl;     /*  LSRJT explanation           */
        wwn_t           pwwn;           /*  port wwn of lport           */
@@ -395,12 +420,15 @@ struct bfa_lps_s {
 struct bfa_lps_mod_s {
        struct list_head                lps_free_q;
        struct list_head                lps_active_q;
+       struct list_head                lps_login_q;
        struct bfa_lps_s        *lps_arr;
        int                     num_lps;
+       struct bfa_mem_kva_s    kva_seg;
 };
 
 #define BFA_LPS_MOD(__bfa)             (&(__bfa)->modules.lps_mod)
 #define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
+#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg))
 
 /*
  * external functions
@@ -477,11 +505,14 @@ struct bfa_fcport_s {
        bfa_boolean_t           diag_busy; /*  diag busy status */
        bfa_boolean_t           beacon; /*  port beacon status */
        bfa_boolean_t           link_e2e_beacon; /*  link beacon status */
+       bfa_boolean_t           bbsc_op_state;  /* Cred recov Oper State */
        struct bfa_fcport_trunk_s trunk;
        u16             fcoe_vlan;
+       struct bfa_mem_dma_s    fcport_dma;
 };
 
 #define BFA_FCPORT_MOD(__bfa)  (&(__bfa)->modules.fcport)
+#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
 
 /*
  * protected functions
@@ -515,8 +546,10 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
-void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
 bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
+void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+                       bfa_boolean_t link_e2e_beacon);
 bfa_boolean_t  bfa_fcport_is_linkup(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
                                  union bfa_fcport_stats_u *stats,
@@ -524,6 +557,9 @@ bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
 bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
                                    void *cbarg);
 bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
 
 /*
  * bfa rport API functions
@@ -577,6 +613,7 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
 bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
 u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
 u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
+void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw);
 
 static inline void *
 bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
@@ -606,11 +643,12 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
 void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
                   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
-                  bfa_boolean_t auth_en);
+                  bfa_boolean_t auth_en, u8 bb_scn);
 void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
                   wwn_t pwwn, wwn_t nwwn);
 void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
 void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
+u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
 u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
 u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
@@ -618,4 +656,57 @@ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
+/* FAA specific APIs */
+bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
+                       bfa_cb_iocfc_t cbfn, void *cbarg);
+bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
+                       bfa_cb_iocfc_t cbfn, void *cbarg);
+bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+                       bfa_cb_iocfc_t cbfn, void *cbarg);
+
+/*
+ *     FC DIAG data structure
+ */
+struct bfa_fcdiag_qtest_s {
+       struct bfa_diag_qtest_result_s *result;
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+       struct bfa_timer_s      timer;
+       u32     status;
+       u32     count;
+       u8      lock;
+       u8      queue;
+       u8      all;
+       u8      timer_active;
+};
+
+struct bfa_fcdiag_lb_s {
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+       void            *result;
+       bfa_boolean_t   lock;
+       u32        status;
+};
+
+struct bfa_fcdiag_s {
+       struct bfa_s    *bfa;           /* Back pointer to BFA */
+       struct bfa_trc_mod_s   *trcmod;
+       struct bfa_fcdiag_lb_s lb;
+       struct bfa_fcdiag_qtest_s qtest;
+};
+
+#define BFA_FCDIAG_MOD(__bfa)  (&(__bfa)->modules.fcdiag)
+
+void   bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+bfa_status_t   bfa_fcdiag_loopback(struct bfa_s *bfa,
+                               enum bfa_port_opmode opmode,
+                               enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+                               struct bfa_diag_loopback_result_s *result,
+                               bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
+                       u32 queue, struct bfa_diag_qtest_result_s *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
+
 #endif /* __BFA_SVC_H__ */
index 59b5e9b..beb30a7 100644 (file)
@@ -56,14 +56,15 @@ int         fdmi_enable = BFA_TRUE;
 int            pcie_max_read_reqsz;
 int            bfa_debugfs_enable = 1;
 int            msix_disable_cb = 0, msix_disable_ct = 0;
+int            max_xfer_size = BFAD_MAX_SECTORS >> 1;
 
 /* Firmware releated */
-u32    bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
-u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
+u32    bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
+u32    *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CT_FC      "ctfw_fc.bin"
-#define BFAD_FW_FILE_CT_CNA     "ctfw_cna.bin"
-#define BFAD_FW_FILE_CB_FC      "cbfw_fc.bin"
+#define BFAD_FW_FILE_CB                "cbfw.bin"
+#define BFAD_FW_FILE_CT                "ctfw.bin"
+#define BFAD_FW_FILE_CT2       "ct2fw.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);
@@ -71,18 +72,18 @@ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
                u32 *bfi_image_size, char *fw_name);
 
 static const char *msix_name_ct[] = {
+       "ctrl",
        "cpe0", "cpe1", "cpe2", "cpe3",
-       "rme0", "rme1", "rme2", "rme3",
-       "ctrl" };
+       "rme0", "rme1", "rme2", "rme3" };
 
 static const char *msix_name_cb[] = {
        "cpe0", "cpe1", "cpe2", "cpe3",
        "rme0", "rme1", "rme2", "rme3",
        "eemc", "elpu0", "elpu1", "epss", "mlpu" };
 
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
-MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
 
 module_param(os_name, charp, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
@@ -144,6 +145,9 @@ MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
                " Range[false:0|true:1]");
+module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
+               " Range[64k|128k|256k|512k|1024k|2048k]");
 
 static void
 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -527,28 +531,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
 void
 bfad_hal_mem_release(struct bfad_s *bfad)
 {
-       int             i;
        struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
-       struct bfa_mem_elem_s *meminfo_elem;
-
-       for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
-               meminfo_elem = &hal_meminfo->meminfo[i];
-               if (meminfo_elem->kva != NULL) {
-                       switch (meminfo_elem->mem_type) {
-                       case BFA_MEM_TYPE_KVA:
-                               vfree(meminfo_elem->kva);
-                               break;
-                       case BFA_MEM_TYPE_DMA:
-                               dma_free_coherent(&bfad->pcidev->dev,
-                                       meminfo_elem->mem_len,
-                                       meminfo_elem->kva,
-                                       (dma_addr_t) meminfo_elem->dma);
-                               break;
-                       default:
-                               WARN_ON(1);
-                               break;
-                       }
-               }
+       struct bfa_mem_dma_s *dma_info, *dma_elem;
+       struct bfa_mem_kva_s *kva_info, *kva_elem;
+       struct list_head *dm_qe, *km_qe;
+
+       dma_info = &hal_meminfo->dma_info;
+       kva_info = &hal_meminfo->kva_info;
+
+       /* Iterate through the KVA meminfo queue */
+       list_for_each(km_qe, &kva_info->qe) {
+               kva_elem = (struct bfa_mem_kva_s *) km_qe;
+               vfree(kva_elem->kva);
+       }
+
+       /* Iterate through the DMA meminfo queue */
+       list_for_each(dm_qe, &dma_info->qe) {
+               dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+               dma_free_coherent(&bfad->pcidev->dev,
+                               dma_elem->mem_len, dma_elem->kva,
+                               (dma_addr_t) dma_elem->dma);
        }
 
        memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
@@ -563,15 +565,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
                bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
        if (num_tms > 0)
                bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
-       if (num_fcxps > 0)
+       if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
                bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
-       if (num_ufbufs > 0)
+       if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
                bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
        if (reqq_size > 0)
                bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
        if (rspq_size > 0)
                bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
-       if (num_sgpgs > 0)
+       if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
                bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
 
        /*
@@ -591,85 +593,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
 bfa_status_t
 bfad_hal_mem_alloc(struct bfad_s *bfad)
 {
-       int             i;
        struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
-       struct bfa_mem_elem_s *meminfo_elem;
-       dma_addr_t      phys_addr;
-       void           *kva;
+       struct bfa_mem_dma_s *dma_info, *dma_elem;
+       struct bfa_mem_kva_s *kva_info, *kva_elem;
+       struct list_head *dm_qe, *km_qe;
        bfa_status_t    rc = BFA_STATUS_OK;
-       int retry_count = 0;
-       int reset_value = 1;
-       int min_num_sgpgs = 512;
+       dma_addr_t      phys_addr;
 
        bfa_cfg_get_default(&bfad->ioc_cfg);
-
-retry:
        bfad_update_hal_cfg(&bfad->ioc_cfg);
        bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
-       bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
-
-       for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
-               meminfo_elem = &hal_meminfo->meminfo[i];
-               switch (meminfo_elem->mem_type) {
-               case BFA_MEM_TYPE_KVA:
-                       kva = vmalloc(meminfo_elem->mem_len);
-                       if (kva == NULL) {
-                               bfad_hal_mem_release(bfad);
-                               rc = BFA_STATUS_ENOMEM;
-                               goto ext;
-                       }
-                       memset(kva, 0, meminfo_elem->mem_len);
-                       meminfo_elem->kva = kva;
-                       break;
-               case BFA_MEM_TYPE_DMA:
-                       kva = dma_alloc_coherent(&bfad->pcidev->dev,
-                               meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
-                       if (kva == NULL) {
-                               bfad_hal_mem_release(bfad);
-                               /*
-                                * If we cannot allocate with default
-                                * num_sgpages try with half the value.
-                                */
-                               if (num_sgpgs > min_num_sgpgs) {
-                                       printk(KERN_INFO
-                                       "bfad[%d]: memory allocation failed"
-                                       " with num_sgpgs: %d\n",
-                                               bfad->inst_no, num_sgpgs);
-                                       nextLowerInt(&num_sgpgs);
-                                       printk(KERN_INFO
-                                       "bfad[%d]: trying to allocate memory"
-                                       " with num_sgpgs: %d\n",
-                                               bfad->inst_no, num_sgpgs);
-                                       retry_count++;
-                                       goto retry;
-                               } else {
-                                       if (num_sgpgs_parm > 0)
-                                               num_sgpgs = num_sgpgs_parm;
-                                       else {
-                                               reset_value =
-                                                       (1 << retry_count);
-                                               num_sgpgs *= reset_value;
-                                       }
-                                       rc = BFA_STATUS_ENOMEM;
-                                       goto ext;
-                               }
-                       }
-
-                       if (num_sgpgs_parm > 0)
-                               num_sgpgs = num_sgpgs_parm;
-                       else {
-                               reset_value = (1 << retry_count);
-                               num_sgpgs *= reset_value;
-                       }
-
-                       memset(kva, 0, meminfo_elem->mem_len);
-                       meminfo_elem->kva = kva;
-                       meminfo_elem->dma = phys_addr;
-                       break;
-               default:
-                       break;
+       bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
+
+       dma_info = &hal_meminfo->dma_info;
+       kva_info = &hal_meminfo->kva_info;
+
+       /* Iterate through the KVA meminfo queue */
+       list_for_each(km_qe, &kva_info->qe) {
+               kva_elem = (struct bfa_mem_kva_s *) km_qe;
+               kva_elem->kva = vmalloc(kva_elem->mem_len);
+               if (kva_elem->kva == NULL) {
+                       bfad_hal_mem_release(bfad);
+                       rc = BFA_STATUS_ENOMEM;
+                       goto ext;
+               }
+               memset(kva_elem->kva, 0, kva_elem->mem_len);
+       }
 
+       /* Iterate through the DMA meminfo queue */
+       list_for_each(dm_qe, &dma_info->qe) {
+               dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+               dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
+                                               dma_elem->mem_len,
+                                               &phys_addr, GFP_KERNEL);
+               if (dma_elem->kva == NULL) {
+                       bfad_hal_mem_release(bfad);
+                       rc = BFA_STATUS_ENOMEM;
+                       goto ext;
                }
+               dma_elem->dma = phys_addr;
+               memset(dma_elem->kva, 0, dma_elem->mem_len);
        }
 ext:
        return rc;
@@ -780,13 +743,17 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
        pci_set_master(pdev);
 
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+       if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+           (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+                  (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
                        printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
                        goto out_release_region;
                }
+       }
 
        bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+       bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
 
        if (bfad->pci_bar0_kva == NULL) {
                printk(KERN_ERR "Fail to map bar0\n");
@@ -797,6 +764,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
        bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
        bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
        bfad->hal_pcidev.device_id = pdev->device;
+       bfad->hal_pcidev.ssid = pdev->subsystem_device;
        bfad->pci_name = pci_name(pdev);
 
        bfad->pci_attr.vendor_id = pdev->vendor;
@@ -868,6 +836,7 @@ void
 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
 {
        pci_iounmap(pdev, bfad->pci_bar0_kva);
+       pci_iounmap(pdev, bfad->pci_bar2_kva);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
@@ -908,12 +877,29 @@ bfad_drv_init(struct bfad_s *bfad)
        bfad->bfa_fcs.trcmod = bfad->trcmod;
        bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
        bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
+       bfa_fcs_init(&bfad->bfa_fcs);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
        bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
 
+       /* configure base port */
+       rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+       if (rc != BFA_STATUS_OK)
+               goto out_cfg_pport_fail;
+
        return BFA_STATUS_OK;
 
+out_cfg_pport_fail:
+       /* fcs exit - on cfg pport failure */
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       init_completion(&bfad->comp);
+       bfad->pport.flags |= BFAD_PORT_DELETE;
+       bfa_fcs_exit(&bfad->bfa_fcs);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       wait_for_completion(&bfad->comp);
+       /* bfa detach - free hal memory */
+       bfa_detach(&bfad->bfa);
+       bfad_hal_mem_release(bfad);
 out_hal_mem_alloc_failure:
        return BFA_STATUS_FAILED;
 }
@@ -945,6 +931,7 @@ bfad_drv_start(struct bfad_s *bfad)
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        bfa_iocfc_start(&bfad->bfa);
+       bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
        bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
        bfad->bfad_flags |= BFAD_HAL_START_DONE;
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -1032,6 +1019,12 @@ bfad_start_ops(struct bfad_s *bfad) {
        struct bfad_vport_s *vport, *vport_new;
        struct bfa_fcs_driver_info_s driver_info;
 
+       /* Limit min/max. xfer size to [64k-32MB] */
+       if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
+               max_xfer_size = BFAD_MIN_SECTORS >> 1;
+       if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
+               max_xfer_size = BFAD_MAX_SECTORS >> 1;
+
        /* Fill the driver_info info to fcs*/
        memset(&driver_info, 0, sizeof(driver_info));
        strncpy(driver_info.version, BFAD_DRIVER_VERSION,
@@ -1049,19 +1042,19 @@ bfad_start_ops(struct bfad_s *bfad) {
        strncpy(driver_info.os_device_name, bfad->pci_name,
                sizeof(driver_info.os_device_name - 1));
 
-       /* FCS INIT */
+       /* FCS driver info init */
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
-       bfa_fcs_init(&bfad->bfa_fcs);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-       retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
-       if (retval != BFA_STATUS_OK) {
-               if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
-                       bfa_sm_set_state(bfad, bfad_sm_failed);
-               bfad_stop(bfad);
-               return BFA_STATUS_FAILED;
-       }
+       /*
+        * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+        * with values learned during bfa_init firmware GETATTR REQ.
+        */
+       bfa_fcs_update_cfg(&bfad->bfa_fcs);
+
+       /* Setup fc host fixed attribute if the lk supports */
+       bfad_fc_host_init(bfad->pport.im_port);
 
        /* BFAD level FC4 IM specific resource allocation */
        retval = bfad_im_probe(bfad);
@@ -1233,8 +1226,8 @@ bfad_install_msix_handler(struct bfad_s *bfad)
        for (i = 0; i < bfad->nvec; i++) {
                sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
                                bfad->pci_name,
-                               ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
-                               msix_name_ct[i] : msix_name_cb[i]));
+                               ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
+                               msix_name_cb[i] : msix_name_ct[i]));
 
                error = request_irq(bfad->msix_tab[i].msix.vector,
                                    (irq_handler_t) bfad_msix, 0,
@@ -1248,6 +1241,9 @@ bfad_install_msix_handler(struct bfad_s *bfad)
                                free_irq(bfad->msix_tab[j].msix.vector,
                                                &bfad->msix_tab[j]);
 
+                       bfad->bfad_flags &= ~BFAD_MSIX_ON;
+                       pci_disable_msix(bfad->pcidev);
+
                        return 1;
                }
        }
@@ -1265,6 +1261,7 @@ bfad_setup_intr(struct bfad_s *bfad)
        u32 mask = 0, i, num_bit = 0, max_bit = 0;
        struct msix_entry msix_entries[MAX_MSIX_ENTRY];
        struct pci_dev *pdev = bfad->pcidev;
+       u16     reg;
 
        /* Call BFA to get the msix map for this PCI function.  */
        bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -1272,8 +1269,8 @@ bfad_setup_intr(struct bfad_s *bfad)
        /* Set up the msix entry table */
        bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
 
-       if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
-           (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
+       if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
+          (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
 
                error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
                if (error) {
@@ -1294,6 +1291,13 @@ bfad_setup_intr(struct bfad_s *bfad)
                        goto line_based;
                }
 
+               /* Disable INTX in MSI-X mode */
+               pci_read_config_word(pdev, PCI_COMMAND, &reg);
+
+               if (!(reg & PCI_COMMAND_INTX_DISABLE))
+                       pci_write_config_word(pdev, PCI_COMMAND,
+                               reg | PCI_COMMAND_INTX_DISABLE);
+
                /* Save the vectors */
                for (i = 0; i < bfad->nvec; i++) {
                        bfa_trc(bfad, msix_entries[i].vector);
@@ -1315,6 +1319,7 @@ line_based:
                /* Enable interrupt handler failed */
                return 1;
        }
+       bfad->bfad_flags |= BFAD_INTX_ON;
 
        return error;
 }
@@ -1331,7 +1336,7 @@ bfad_remove_intr(struct bfad_s *bfad)
 
                pci_disable_msix(bfad->pcidev);
                bfad->bfad_flags &= ~BFAD_MSIX_ON;
-       } else {
+       } else if (bfad->bfad_flags & BFAD_INTX_ON) {
                free_irq(bfad->pcidev->irq, bfad);
        }
 }
@@ -1501,6 +1506,14 @@ struct pci_device_id bfad_id_table[] = {
                .class = (PCI_CLASS_SERIAL_FIBER << 8),
                .class_mask = ~0,
        },
+       {
+               .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+               .device = BFA_PCI_DEVICE_ID_CT2,
+               .subvendor = PCI_ANY_ID,
+               .subdevice = PCI_ANY_ID,
+               .class = (PCI_CLASS_SERIAL_FIBER << 8),
+               .class_mask = ~0,
+       },
 
        {0, 0},
 };
@@ -1594,33 +1607,33 @@ out:
 static u32 *
 bfad_load_fwimg(struct pci_dev *pdev)
 {
-       if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
-               if (bfi_image_ct_fc_size == 0)
-                       bfad_read_firmware(pdev, &bfi_image_ct_fc,
-                               &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
-               return bfi_image_ct_fc;
-       } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
-               if (bfi_image_ct_cna_size == 0)
-                       bfad_read_firmware(pdev, &bfi_image_ct_cna,
-                               &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
-               return bfi_image_ct_cna;
+       if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+               if (bfi_image_ct2_size == 0)
+                       bfad_read_firmware(pdev, &bfi_image_ct2,
+                               &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+               return bfi_image_ct2;
+       } else if (bfa_asic_id_ct(pdev->device)) {
+               if (bfi_image_ct_size == 0)
+                       bfad_read_firmware(pdev, &bfi_image_ct,
+                               &bfi_image_ct_size, BFAD_FW_FILE_CT);
+               return bfi_image_ct;
        } else {
-               if (bfi_image_cb_fc_size == 0)
-                       bfad_read_firmware(pdev, &bfi_image_cb_fc,
-                               &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
-               return bfi_image_cb_fc;
+               if (bfi_image_cb_size == 0)
+                       bfad_read_firmware(pdev, &bfi_image_cb,
+                               &bfi_image_cb_size, BFAD_FW_FILE_CB);
+               return bfi_image_cb;
        }
 }
 
 static void
 bfad_free_fwimg(void)
 {
-       if (bfi_image_ct_fc_size && bfi_image_ct_fc)
-               vfree(bfi_image_ct_fc);
-       if (bfi_image_ct_cna_size && bfi_image_ct_cna)
-               vfree(bfi_image_ct_cna);
-       if (bfi_image_cb_fc_size && bfi_image_cb_fc)
-               vfree(bfi_image_cb_fc);
+       if (bfi_image_ct2_size && bfi_image_ct2)
+               vfree(bfi_image_ct2);
+       if (bfi_image_ct_size && bfi_image_ct)
+               vfree(bfi_image_ct);
+       if (bfi_image_cb_size && bfi_image_cb)
+               vfree(bfi_image_cb);
 }
 
 module_init(bfad_init);
index a94ea42..9d95844 100644 (file)
@@ -218,6 +218,9 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
        case BFA_PORT_SPEED_10GBPS:
                fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
                break;
+       case BFA_PORT_SPEED_16GBPS:
+               fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+               break;
        case BFA_PORT_SPEED_8GBPS:
                fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
                break;
@@ -580,6 +583,8 @@ struct fc_function_template bfad_im_fc_function_template = {
        .vport_create = bfad_im_vport_create,
        .vport_delete = bfad_im_vport_delete,
        .vport_disable = bfad_im_vport_disable,
+       .bsg_request = bfad_im_bsg_request,
+       .bsg_timeout = bfad_im_bsg_timeout,
 };
 
 struct fc_function_template bfad_im_vport_fc_function_template = {
@@ -674,8 +679,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
        struct bfad_s *bfad = im_port->bfad;
        char model[BFA_ADAPTER_MODEL_NAME_LEN];
        char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+       int nports = 0;
 
        bfa_get_adapter_model(&bfad->bfa, model);
+       nports = bfa_get_nports(&bfad->bfa);
        if (!strcmp(model, "Brocade-425"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Brocade 4Gbps PCIe dual port FC HBA");
@@ -684,10 +691,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                        "Brocade 8Gbps PCIe dual port FC HBA");
        else if (!strcmp(model, "Brocade-42B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 4Gbps PCIe dual port FC HBA");
+                       "Brocade 4Gbps PCIe dual port FC HBA for HP");
        else if (!strcmp(model, "Brocade-82B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 8Gbps PCIe dual port FC HBA");
+                       "Brocade 8Gbps PCIe dual port FC HBA for HP");
        else if (!strcmp(model, "Brocade-1010"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Brocade 10Gbps single port CNA");
@@ -696,7 +703,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                        "Brocade 10Gbps dual port CNA");
        else if (!strcmp(model, "Brocade-1007"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "Brocade 10Gbps CNA");
+                       "Brocade 10Gbps CNA for IBM Blade Center");
        else if (!strcmp(model, "Brocade-415"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Brocade 4Gbps PCIe single port FC HBA");
@@ -705,17 +712,45 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                        "Brocade 8Gbps PCIe single port FC HBA");
        else if (!strcmp(model, "Brocade-41B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 4Gbps PCIe single port FC HBA");
+                       "Brocade 4Gbps PCIe single port FC HBA for HP");
        else if (!strcmp(model, "Brocade-81B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 8Gbps PCIe single port FC HBA");
+                       "Brocade 8Gbps PCIe single port FC HBA for HP");
        else if (!strcmp(model, "Brocade-804"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP Bladesystem C-class 8Gbps FC HBA");
-       else if (!strcmp(model, "Brocade-902"))
+                       "Brocade 8Gbps FC HBA for HP Bladesystem C-class");
+       else if (!strcmp(model, "Brocade-902") ||
+                !strcmp(model, "Brocade-1741"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "Brocade 10Gbps CNA");
-       else
+                       "Brocade 10Gbps CNA for Dell M-Series Blade Servers");
+       else if (strstr(model, "Brocade-1560")) {
+               if (nports == 1)
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe single port FC HBA");
+               else
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe dual port FC HBA");
+       } else if (strstr(model, "Brocade-1710")) {
+               if (nports == 1)
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps single port CNA");
+               else
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps dual port CNA");
+       } else if (strstr(model, "Brocade-1860")) {
+               if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps single port CNA");
+               else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe single port FC HBA");
+               else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps dual port CNA");
+               else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe dual port FC HBA");
+       } else
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Invalid Model");
 
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644 (file)
index 0000000..89f863e
--- /dev/null
@@ -0,0 +1,2163 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfad_bsg.h"
+
+BFA_TRC_FILE(LDRV, BSG);
+
+int
+bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       int     rc = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       /* If IOC is not in disabled state - return */
+       if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_IOC_FAILURE;
+               return rc;
+       }
+
+       init_completion(&bfad->enable_comp);
+       bfa_iocfc_enable(&bfad->bfa);
+       iocmd->status = BFA_STATUS_OK;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       wait_for_completion(&bfad->enable_comp);
+
+       return rc;
+}
+
+int
+bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       int     rc = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (bfad->disable_active) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               return EBUSY;
+       }
+
+       bfad->disable_active = BFA_TRUE;
+       init_completion(&bfad->disable_comp);
+       bfa_iocfc_disable(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       wait_for_completion(&bfad->disable_comp);
+       bfad->disable_active = BFA_FALSE;
+       iocmd->status = BFA_STATUS_OK;
+
+       return rc;
+}
+
+static int
+bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
+{
+       int     i;
+       struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
+       struct bfad_im_port_s   *im_port;
+       struct bfa_port_attr_s  pattr;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_fcport_get_attr(&bfad->bfa, &pattr);
+       iocmd->nwwn = pattr.nwwn;
+       iocmd->pwwn = pattr.pwwn;
+       iocmd->ioc_type = bfa_get_type(&bfad->bfa);
+       iocmd->mac = bfa_get_mac(&bfad->bfa);
+       iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
+       bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
+       iocmd->factorynwwn = pattr.factorynwwn;
+       iocmd->factorypwwn = pattr.factorypwwn;
+       im_port = bfad->pport.im_port;
+       iocmd->host = im_port->shost->host_no;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       strcpy(iocmd->name, bfad->adapter_name);
+       strcpy(iocmd->port_name, bfad->port_name);
+       strcpy(iocmd->hwpath, bfad->pci_name);
+
+       /* set adapter hw path */
+       strcpy(iocmd->adapter_hwpath, bfad->pci_name);
+       i = strlen(iocmd->adapter_hwpath) - 1;
+       while (iocmd->adapter_hwpath[i] != '.')
+               i--;
+       iocmd->adapter_hwpath[i] = '\0';
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       /* fill in driver attr info */
+       strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
+       strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+               BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
+       strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
+               iocmd->ioc_attr.adapter_attr.fw_ver);
+       strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
+               iocmd->ioc_attr.adapter_attr.optrom_ver);
+
+       /* copy chip rev info first otherwise it will be overwritten */
+       memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
+               sizeof(bfad->pci_attr.chip_rev));
+       memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
+               sizeof(struct bfa_ioc_pci_attr_s));
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
+
+       bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_ioc_fwstats_s *iocmd =
+                       (struct bfa_bsg_ioc_fwstats_s *)cmd;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_ioc_fwstats_s),
+                       sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               goto out;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+out:
+       bfa_trc(bfad, 0x6666);
+       return 0;
+}
+
+int
+bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
+
+       iocmd->status = BFA_STATUS_OK;
+       bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
+
+       return 0;
+}
+
+int
+bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               return 0;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       return 0;
+}
+
+int
+bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               return 0;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       return 0;
+}
+
+static int
+bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
+       struct bfa_lport_attr_s port_attr;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
+       bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
+               iocmd->attr.pid = port_attr.pid;
+       else
+               iocmd->attr.pid = 0;
+
+       iocmd->attr.port_type = port_attr.port_type;
+       iocmd->attr.loopback = port_attr.loopback;
+       iocmd->attr.authfail = port_attr.authfail;
+       strncpy(iocmd->attr.port_symname.symname,
+               port_attr.port_cfg.sym_name.symname,
+               sizeof(port_attr.port_cfg.sym_name.symname));
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_port_stats_s),
+                       sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
+                               iocmd_bufptr, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+static int
+bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s  *fcs_port;
+       struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_bsg_lport_stats_s *iocmd =
+                       (struct bfa_bsg_lport_stats_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_bsg_lport_iostats_s *iocmd =
+                       (struct bfa_bsg_lport_iostats_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
+                       fcs_port->lp_tag);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_lport_get_rports_s *iocmd =
+                       (struct bfa_bsg_lport_get_rports_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       unsigned long   flags;
+       void    *iocmd_bufptr;
+
+       if (iocmd->nrports == 0)
+               return EINVAL;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_lport_get_rports_s),
+                       sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd +
+                       sizeof(struct bfa_bsg_lport_get_rports_s);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               bfa_trc(bfad, 0);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
+                               &iocmd->nrports);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_rport == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+static int
+bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_scsi_addr_s *iocmd =
+                       (struct bfa_bsg_rport_scsi_addr_s *)cmd;
+       struct bfa_fcs_lport_s  *fcs_port;
+       struct bfa_fcs_itnim_s  *fcs_itnim;
+       struct bfad_itnim_s     *drv_itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_itnim == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       drv_itnim = fcs_itnim->itnim_drv;
+
+       if (drv_itnim && drv_itnim->im_port)
+               iocmd->host = drv_itnim->im_port->shost->host_no;
+       else {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       iocmd->target = drv_itnim->scsi_tgt_id;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->bus = 0;
+       iocmd->lun = 0;
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_stats_s *iocmd =
+                       (struct bfa_bsg_rport_stats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_rport == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
+               sizeof(struct bfa_rport_stats_s));
+       memcpy((void *)&iocmd->stats.hal_stats,
+              (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+              sizeof(struct bfa_rport_hal_stats_s));
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+static int
+bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_fabric_get_lports_s *iocmd =
+                       (struct bfa_bsg_fabric_get_lports_s *)cmd;
+       bfa_fcs_vf_t    *fcs_vf;
+       uint32_t        nports = iocmd->nports;
+       unsigned long   flags;
+       void    *iocmd_bufptr;
+
+       if (nports == 0) {
+               iocmd->status = BFA_STATUS_EINVAL;
+               goto out;
+       }
+
+       if (bfad_chk_iocmd_sz(payload_len,
+               sizeof(struct bfa_bsg_fabric_get_lports_s),
+               sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               goto out;
+       }
+
+       iocmd_bufptr = (char *)iocmd +
+                       sizeof(struct bfa_bsg_fabric_get_lports_s);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+       if (fcs_vf == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+               goto out;
+       }
+       bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->nports = nports;
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_modstats_s *iocmd =
+                       (struct bfa_bsg_fcpim_modstats_s *)cmd;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+       struct list_head *qe, *qen;
+       struct bfa_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       /* accumulate IO stats from itnim */
+       memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
+                       (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
+               sizeof(struct bfa_fcpim_del_itn_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
+       struct bfa_fcs_lport_s  *fcs_port;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port)
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+       else
+               iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
+                                       iocmd->rpwwn, &iocmd->attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_iostats_s *iocmd =
+                       (struct bfa_bsg_itnim_iostats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port) {
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               bfa_trc(bfad, 0);
+       } else {
+               itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+               if (itnim == NULL)
+                       iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               else {
+                       iocmd->status = BFA_STATUS_OK;
+                       memcpy((void *)&iocmd->iostats, (void *)
+                              &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+                              sizeof(struct bfa_itnim_iostats_s));
+               }
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_itnstats_s *iocmd =
+                       (struct bfa_bsg_itnim_itnstats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port) {
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               bfa_trc(bfad, 0);
+       } else {
+               itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+               if (itnim == NULL)
+                       iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               else {
+                       iocmd->status = BFA_STATUS_OK;
+                       bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
+                                       &iocmd->itnstats);
+               }
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_enable(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_disable(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
+                               &iocmd->pcifn_cfg,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
+                               &iocmd->pcifn_id, iocmd->port,
+                               iocmd->pcifn_class, iocmd->bandwidth,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
+                               iocmd->pcifn_id,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
+                               iocmd->pcifn_id, iocmd->bandwidth,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       bfa_trc(bfad, iocmd->status);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_adapter_cfg_mode_s *iocmd =
+                       (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
+                               iocmd->cfg.mode, iocmd->cfg.max_pf,
+                               iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_port_cfg_mode_s *iocmd =
+                       (struct bfa_bsg_port_cfg_mode_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
+                               iocmd->instance, iocmd->cfg.mode,
+                               iocmd->cfg.max_pf, iocmd->cfg.max_vf,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
+               iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
+                                       bfad_hcb_comp, &fcomp);
+       else
+               iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+       struct bfad_hal_comp    fcomp;
+
+       init_completion(&fcomp.comp);
+       iocmd->status = BFA_STATUS_OK;
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+       struct bfad_hal_comp    fcomp;
+
+       init_completion(&fcomp.comp);
+       iocmd->status = BFA_STATUS_OK;
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
+       struct bfad_hal_comp    fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       iocmd->status = BFA_STATUS_OK;
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+       struct bfa_bsg_cee_attr_s *iocmd =
+                               (struct bfa_bsg_cee_attr_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp    cee_comp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_cee_attr_s),
+                       sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
+
+       cee_comp.status = 0;
+       init_completion(&cee_comp.comp);
+       mutex_lock(&bfad_mutex);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
+                                        bfad_hcb_comp, &cee_comp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               mutex_unlock(&bfad_mutex);
+               bfa_trc(bfad, 0x5555);
+               goto out;
+       }
+       wait_for_completion(&cee_comp.comp);
+       mutex_unlock(&bfad_mutex);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_cee_stats_s *iocmd =
+                               (struct bfa_bsg_cee_stats_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp    cee_comp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_cee_stats_s),
+                       sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
+
+       cee_comp.status = 0;
+       init_completion(&cee_comp.comp);
+       mutex_lock(&bfad_mutex);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
+                                       bfad_hcb_comp, &cee_comp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               mutex_unlock(&bfad_mutex);
+               bfa_trc(bfad, 0x5555);
+               goto out;
+       }
+       wait_for_completion(&cee_comp.comp);
+       mutex_unlock(&bfad_mutex);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               bfa_trc(bfad, 0x5555);
+       return 0;
+}
+
+int
+bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
+       struct bfad_hal_comp    fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
+       struct bfad_hal_comp    fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_flash_attr_s *iocmd =
+                       (struct bfa_bsg_flash_attr_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+                               iocmd->instance, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_flash_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+                               iocmd->type, iocmd->instance, iocmd_bufptr,
+                               iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_flash_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+                               iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_get_temp_s *iocmd =
+                       (struct bfa_bsg_diag_get_temp_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
+                               &iocmd->result, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_memtest_s *iocmd =
+                       (struct bfa_bsg_diag_memtest_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
+                               &iocmd->memtest, iocmd->pat,
+                               &iocmd->result, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_loopback_s *iocmd =
+                       (struct bfa_bsg_diag_loopback_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
+                               iocmd->speed, iocmd->lpcnt, iocmd->pat,
+                               &iocmd->result, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_fwping_s *iocmd =
+                       (struct bfa_bsg_diag_fwping_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
+                               iocmd->pattern, &iocmd->result,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       bfa_trc(bfad, 0x77771);
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
+                               iocmd->queue, &iocmd->result,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_sfp_show_s *iocmd =
+                       (struct bfa_bsg_sfp_show_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       bfa_trc(bfad, iocmd->status);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
+                               &iocmd->ledtest);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_beacon_s *iocmd =
+                       (struct bfa_bsg_diag_beacon_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
+                               iocmd->beacon, iocmd->link_e2e_beacon,
+                               iocmd->second);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_lb_stat_s *iocmd =
+                       (struct bfa_bsg_diag_lb_stat_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+
+       return 0;
+}
+
+int
+bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_phy_attr_s *iocmd =
+                       (struct bfa_bsg_phy_attr_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
+                               &iocmd->attr, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_phy_stats_s *iocmd =
+                       (struct bfa_bsg_phy_stats_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
+                               &iocmd->stats, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+       struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_phy_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
+                               iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+                               0, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_vhba_attr_s *iocmd =
+                       (struct bfa_bsg_vhba_attr_s *)cmd;
+       struct bfa_vhba_attr_s *attr = &iocmd->attr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       attr->pwwn =  bfad->bfa.ioc.attr->pwwn;
+       attr->nwwn =  bfad->bfa.ioc.attr->nwwn;
+       attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
+       attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
+       attr->path_tov  = bfa_fcpim_path_tov_get(&bfad->bfa);
+       iocmd->status = BFA_STATUS_OK;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+       struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_phy_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
+                               iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+                               0, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+       void *iocmd_bufptr;
+
+       if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
+               bfa_trc(bfad, sizeof(struct bfa_plog_s));
+               iocmd->status = BFA_STATUS_EINVAL;
+               goto out;
+       }
+
+       iocmd->status = BFA_STATUS_OK;
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+       memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
+out:
+       return 0;
+}
+
+static int
+bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
+               unsigned int payload_len)
+{
+       int rc = EINVAL;
+
+       switch (cmd) {
+       case IOCMD_IOC_ENABLE:
+               rc = bfad_iocmd_ioc_enable(bfad, iocmd);
+               break;
+       case IOCMD_IOC_DISABLE:
+               rc = bfad_iocmd_ioc_disable(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_INFO:
+               rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_ATTR:
+               rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_STATS:
+               rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_FWSTATS:
+               rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_IOCFC_GET_ATTR:
+               rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_IOCFC_SET_INTR:
+               rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
+               break;
+       case IOCMD_PORT_ENABLE:
+               rc = bfad_iocmd_port_enable(bfad, iocmd);
+               break;
+       case IOCMD_PORT_DISABLE:
+               rc = bfad_iocmd_port_disable(bfad, iocmd);
+               break;
+       case IOCMD_PORT_GET_ATTR:
+               rc = bfad_iocmd_port_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_PORT_GET_STATS:
+               rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_LPORT_GET_ATTR:
+               rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_GET_STATS:
+               rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_GET_IOSTATS:
+               rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_GET_RPORTS:
+               rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_RPORT_GET_ATTR:
+               rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_RPORT_GET_ADDR:
+               rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
+               break;
+       case IOCMD_RPORT_GET_STATS:
+               rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_FABRIC_GET_LPORTS:
+               rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_FCPIM_MODSTATS:
+               rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_DEL_ITN_STATS:
+               rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_GET_ATTR:
+               rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_GET_IOSTATS:
+               rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_GET_ITNSTATS:
+               rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
+               break;
+       case IOCMD_FCPORT_ENABLE:
+               rc = bfad_iocmd_fcport_enable(bfad, iocmd);
+               break;
+       case IOCMD_FCPORT_DISABLE:
+               rc = bfad_iocmd_fcport_disable(bfad, iocmd);
+               break;
+       case IOCMD_IOC_PCIFN_CFG:
+               rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
+               break;
+       case IOCMD_PCIFN_CREATE:
+               rc = bfad_iocmd_pcifn_create(bfad, iocmd);
+               break;
+       case IOCMD_PCIFN_DELETE:
+               rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
+               break;
+       case IOCMD_PCIFN_BW:
+               rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
+               break;
+       case IOCMD_ADAPTER_CFG_MODE:
+               rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
+               break;
+       case IOCMD_PORT_CFG_MODE:
+               rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_ENABLE_OPTROM:
+       case IOCMD_FLASH_DISABLE_OPTROM:
+               rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
+               break;
+       case IOCMD_FAA_ENABLE:
+               rc = bfad_iocmd_faa_enable(bfad, iocmd);
+               break;
+       case IOCMD_FAA_DISABLE:
+               rc = bfad_iocmd_faa_disable(bfad, iocmd);
+               break;
+       case IOCMD_FAA_QUERY:
+               rc = bfad_iocmd_faa_query(bfad, iocmd);
+               break;
+       case IOCMD_CEE_GET_ATTR:
+               rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_CEE_GET_STATS:
+               rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_CEE_RESET_STATS:
+               rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_SFP_MEDIA:
+               rc = bfad_iocmd_sfp_media(bfad, iocmd);
+                break;
+       case IOCMD_SFP_SPEED:
+               rc = bfad_iocmd_sfp_speed(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_GET_ATTR:
+               rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_ERASE_PART:
+               rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_UPDATE_PART:
+               rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_FLASH_READ_PART:
+               rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_DIAG_TEMP:
+               rc = bfad_iocmd_diag_temp(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_MEMTEST:
+               rc = bfad_iocmd_diag_memtest(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_LOOPBACK:
+               rc = bfad_iocmd_diag_loopback(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_FWPING:
+               rc = bfad_iocmd_diag_fwping(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_QUEUETEST:
+               rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_SFP:
+               rc = bfad_iocmd_diag_sfp(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_LED:
+               rc = bfad_iocmd_diag_led(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_BEACON_LPORT:
+               rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_LB_STAT:
+               rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
+               break;
+       case IOCMD_PHY_GET_ATTR:
+               rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_PHY_GET_STATS:
+               rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_PHY_UPDATE_FW:
+               rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_PHY_READ_FW:
+               rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_VHBA_QUERY:
+               rc = bfad_iocmd_vhba_query(bfad, iocmd);
+               break;
+       case IOCMD_DEBUG_PORTLOG:
+               rc = bfad_iocmd_porglog_get(bfad, iocmd);
+               break;
+       default:
+               rc = EINVAL;
+               break;
+       }
+       return -rc;
+}
+
+static int
+bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+{
+       uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
+       struct bfad_im_port_s *im_port =
+                       (struct bfad_im_port_s *) job->shost->hostdata[0];
+       struct bfad_s *bfad = im_port->bfad;
+       void *payload_kbuf;
+       int rc = -EINVAL;
+
+       /* Allocate a temp buffer to hold the passed in user space command */
+       payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+       if (!payload_kbuf) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt, payload_kbuf,
+                         job->request_payload.payload_len);
+
+       /* Invoke IOCMD handler - to handle all the vendor command requests */
+       rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
+                               job->request_payload.payload_len);
+       if (rc != BFA_STATUS_OK)
+               goto error;
+
+       /* Copy the response data to the job->reply_payload sg_list */
+       sg_copy_from_buffer(job->reply_payload.sg_list,
+                           job->reply_payload.sg_cnt,
+                           payload_kbuf,
+                           job->reply_payload.payload_len);
+
+       /* free the command buffer */
+       kfree(payload_kbuf);
+
+       /* Fill the BSG job reply data */
+       job->reply_len = job->reply_payload.payload_len;
+       job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+       job->reply->result = rc;
+
+       job->job_done(job);
+       return rc;
+error:
+       /* free the command buffer */
+       kfree(payload_kbuf);
+out:
+       job->reply->result = rc;
+       job->reply_len = sizeof(uint32_t);
+       job->reply->reply_payload_rcv_len = 0;
+       return rc;
+}
+
+/* FC passthru call backs */
+u64
+bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s  *sge;
+       u64     addr;
+
+       sge = drv_fcxp->req_sge + sgeid;
+       addr = (u64)(size_t) sge->sg_addr;
+       return addr;
+}
+
+u32
+bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s        *sge;
+
+       sge = drv_fcxp->req_sge + sgeid;
+       return sge->sg_len;
+}
+
+u64
+bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s        *sge;
+       u64     addr;
+
+       sge = drv_fcxp->rsp_sge + sgeid;
+       addr = (u64)(size_t) sge->sg_addr;
+       return addr;
+}
+
+u32
+bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s        *sge;
+
+       sge = drv_fcxp->rsp_sge + sgeid;
+       return sge->sg_len;
+}
+
+void
+bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+               bfa_status_t req_status, u32 rsp_len, u32 resid_len,
+               struct fchs_s *rsp_fchs)
+{
+       struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+
+       drv_fcxp->req_status = req_status;
+       drv_fcxp->rsp_len = rsp_len;
+
+       /* bfa_fcxp will be automatically freed by BFA */
+       drv_fcxp->bfa_fcxp = NULL;
+       complete(&drv_fcxp->comp);
+}
+
+struct bfad_buf_info *
+bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
+                uint32_t payload_len, uint32_t *num_sgles)
+{
+       struct bfad_buf_info    *buf_base, *buf_info;
+       struct bfa_sge_s        *sg_table;
+       int sge_num = 1;
+
+       buf_base = kzalloc((sizeof(struct bfad_buf_info) +
+                          sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
+       if (!buf_base)
+               return NULL;
+
+       sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
+                       (sizeof(struct bfad_buf_info) * sge_num));
+
+       /* Allocate dma coherent memory */
+       buf_info = buf_base;
+       buf_info->size = payload_len;
+       buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
+                                       &buf_info->phys, GFP_KERNEL);
+       if (!buf_info->virt)
+               goto out_free_mem;
+
+       /* copy the linear bsg buffer to buf_info */
+       memset(buf_info->virt, 0, buf_info->size);
+       memcpy(buf_info->virt, payload_kbuf, buf_info->size);
+
+       /*
+        * Setup SG table
+        */
+       sg_table->sg_len = buf_info->size;
+       sg_table->sg_addr = (void *)(size_t) buf_info->phys;
+
+       *num_sgles = sge_num;
+
+       return buf_base;
+
+out_free_mem:
+       kfree(buf_base);
+       return NULL;
+}
+
+void
+bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
+                  uint32_t num_sgles)
+{
+       int i;
+       struct bfad_buf_info *buf_info = buf_base;
+
+       if (buf_base) {
+               for (i = 0; i < num_sgles; buf_info++, i++) {
+                       if (buf_info->virt != NULL)
+                               dma_free_coherent(&bfad->pcidev->dev,
+                                       buf_info->size, buf_info->virt,
+                                       buf_info->phys);
+               }
+               kfree(buf_base);
+       }
+}
+
+int
+bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+                  bfa_bsg_fcpt_t *bsg_fcpt)
+{
+       struct bfa_fcxp_s *hal_fcxp;
+       struct bfad_s   *bfad = drv_fcxp->port->bfad;
+       unsigned long   flags;
+       uint8_t lp_tag;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+       /* Allocate bfa_fcxp structure */
+       hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
+                                 drv_fcxp->num_req_sgles,
+                                 drv_fcxp->num_rsp_sgles,
+                                 bfad_fcxp_get_req_sgaddr_cb,
+                                 bfad_fcxp_get_req_sglen_cb,
+                                 bfad_fcxp_get_rsp_sgaddr_cb,
+                                 bfad_fcxp_get_rsp_sglen_cb);
+       if (!hal_fcxp) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               return BFA_STATUS_ENOMEM;
+       }
+
+       drv_fcxp->bfa_fcxp = hal_fcxp;
+
+       lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
+
+       bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
+                     bsg_fcpt->cts, bsg_fcpt->cos,
+                     job->request_payload.payload_len,
+                     &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
+                     job->reply_payload.payload_len, bsg_fcpt->tsecs);
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return BFA_STATUS_OK;
+}
+
+int
+bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+{
+       struct bfa_bsg_data *bsg_data;
+       struct bfad_im_port_s *im_port =
+                       (struct bfad_im_port_s *) job->shost->hostdata[0];
+       struct bfad_s *bfad = im_port->bfad;
+       bfa_bsg_fcpt_t *bsg_fcpt;
+       struct bfad_fcxp    *drv_fcxp;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       uint32_t command_type = job->request->msgcode;
+       unsigned long flags;
+       struct bfad_buf_info *rsp_buf_info;
+       void *req_kbuf = NULL, *rsp_kbuf = NULL;
+       int rc = -EINVAL;
+
+       job->reply_len  = sizeof(uint32_t);     /* Atleast uint32_t reply_len */
+       job->reply->reply_payload_rcv_len = 0;
+
+       /* Get the payload passed in from userspace */
+       bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
+                                       sizeof(struct fc_bsg_request));
+       if (bsg_data == NULL)
+               goto out;
+
+       /*
+        * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
+        * buffer of size bsg_data->payload_len
+        */
+       bsg_fcpt = (struct bfa_bsg_fcpt_s *)
+                  kzalloc(bsg_data->payload_len, GFP_KERNEL);
+       if (!bsg_fcpt)
+               goto out;
+
+       if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
+                               bsg_data->payload_len)) {
+               kfree(bsg_fcpt);
+               goto out;
+       }
+
+       drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
+       if (drv_fcxp == NULL) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
+                                       bsg_fcpt->lpwwn);
+       if (fcs_port == NULL) {
+               bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               goto out_free_mem;
+       }
+
+       /* Check if the port is online before sending FC Passthru cmd */
+       if (!bfa_fcs_lport_is_online(fcs_port)) {
+               bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               goto out_free_mem;
+       }
+
+       drv_fcxp->port = fcs_port->bfad_port;
+
+       if (drv_fcxp->port->bfad == 0)
+               drv_fcxp->port->bfad = bfad;
+
+       /* Fetch the bfa_rport - if nexus needed */
+       if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
+           command_type == FC_BSG_HST_CT) {
+               /* BSG HST commands: no nexus needed */
+               drv_fcxp->bfa_rport = NULL;
+
+       } else if (command_type == FC_BSG_RPT_ELS ||
+                  command_type == FC_BSG_RPT_CT) {
+               /* BSG RPT commands: nexus needed */
+               fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
+                                                           bsg_fcpt->dpwwn);
+               if (fcs_rport == NULL) {
+                       bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
+                       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+                       goto out_free_mem;
+               }
+
+               drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
+
+       } else { /* Unknown BSG msgcode; return -EINVAL */
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               goto out_free_mem;
+       }
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       /* allocate memory for req / rsp buffers */
+       req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+       if (!req_kbuf) {
+               printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
+       if (!rsp_kbuf) {
+               printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       /* map req sg - copy the sg_list passed in to the linear buffer */
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt, req_kbuf,
+                         job->request_payload.payload_len);
+
+       drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
+                                       job->request_payload.payload_len,
+                                       &drv_fcxp->num_req_sgles);
+       if (!drv_fcxp->reqbuf_info) {
+               printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       drv_fcxp->req_sge = (struct bfa_sge_s *)
+                           (((uint8_t *)drv_fcxp->reqbuf_info) +
+                           (sizeof(struct bfad_buf_info) *
+                                       drv_fcxp->num_req_sgles));
+
+       /* map rsp sg */
+       drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
+                                       job->reply_payload.payload_len,
+                                       &drv_fcxp->num_rsp_sgles);
+       if (!drv_fcxp->rspbuf_info) {
+               printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
+       drv_fcxp->rsp_sge = (struct bfa_sge_s  *)
+                           (((uint8_t *)drv_fcxp->rspbuf_info) +
+                           (sizeof(struct bfad_buf_info) *
+                                       drv_fcxp->num_rsp_sgles));
+
+       /* fcxp send */
+       init_completion(&drv_fcxp->comp);
+       rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
+       if (rc == BFA_STATUS_OK) {
+               wait_for_completion(&drv_fcxp->comp);
+               bsg_fcpt->status = drv_fcxp->req_status;
+       } else {
+               bsg_fcpt->status = rc;
+               goto out_free_mem;
+       }
+
+       /* fill the job->reply data */
+       if (drv_fcxp->req_status == BFA_STATUS_OK) {
+               job->reply_len = drv_fcxp->rsp_len;
+               job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+               job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+       } else {
+               job->reply->reply_payload_rcv_len =
+                                       sizeof(struct fc_bsg_ctels_reply);
+               job->reply_len = sizeof(uint32_t);
+               job->reply->reply_data.ctels_reply.status =
+                                               FC_CTELS_STATUS_REJECT;
+       }
+
+       /* Copy the response data to the reply_payload sg list */
+       sg_copy_from_buffer(job->reply_payload.sg_list,
+                           job->reply_payload.sg_cnt,
+                           (uint8_t *)rsp_buf_info->virt,
+                           job->reply_payload.payload_len);
+
+out_free_mem:
+       bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
+                          drv_fcxp->num_rsp_sgles);
+       bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
+                          drv_fcxp->num_req_sgles);
+       kfree(req_kbuf);
+       kfree(rsp_kbuf);
+
+       /* Need a copy to user op */
+       if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
+                        bsg_data->payload_len))
+               rc = -EIO;
+
+       kfree(bsg_fcpt);
+       kfree(drv_fcxp);
+out:
+       job->reply->result = rc;
+
+       if (rc == BFA_STATUS_OK)
+               job->job_done(job);
+
+       return rc;
+}
+
+int
+bfad_im_bsg_request(struct fc_bsg_job *job)
+{
+       uint32_t rc = BFA_STATUS_OK;
+
+       switch (job->request->msgcode) {
+       case FC_BSG_HST_VENDOR:
+               /* Process BSG HST Vendor requests */
+               rc = bfad_im_bsg_vendor_request(job);
+               break;
+       case FC_BSG_HST_ELS_NOLOGIN:
+       case FC_BSG_RPT_ELS:
+       case FC_BSG_HST_CT:
+       case FC_BSG_RPT_CT:
+               /* Process BSG ELS/CT commands */
+               rc = bfad_im_bsg_els_ct_request(job);
+               break;
+       default:
+               job->reply->result = rc = -EINVAL;
+               job->reply->reply_payload_rcv_len = 0;
+               break;
+       }
+
+       return rc;
+}
+
+int
+bfad_im_bsg_timeout(struct fc_bsg_job *job)
+{
+       /* Don't complete the BSG job request - return -EAGAIN
+        * to reset bsg job timeout : for ELS/CT pass thru we
+        * already have timer to track the request.
+        */
+       return -EAGAIN;
+}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644 (file)
index 0000000..99b0e8a
--- /dev/null
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef BFAD_BSG_H
+#define BFAD_BSG_H
+
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+
+/* Definitions of vendor unique structures and command codes passed in
+ * using FC_BSG_HST_VENDOR message code.
+ */
+enum {
+       IOCMD_IOC_ENABLE = 0x1,
+       IOCMD_IOC_DISABLE,
+       IOCMD_IOC_GET_ATTR,
+       IOCMD_IOC_GET_INFO,
+       IOCMD_IOC_GET_STATS,
+       IOCMD_IOC_GET_FWSTATS,
+       IOCMD_IOCFC_GET_ATTR,
+       IOCMD_IOCFC_SET_INTR,
+       IOCMD_PORT_ENABLE,
+       IOCMD_PORT_DISABLE,
+       IOCMD_PORT_GET_ATTR,
+       IOCMD_PORT_GET_STATS,
+       IOCMD_LPORT_GET_ATTR,
+       IOCMD_LPORT_GET_RPORTS,
+       IOCMD_LPORT_GET_STATS,
+       IOCMD_LPORT_GET_IOSTATS,
+       IOCMD_RPORT_GET_ATTR,
+       IOCMD_RPORT_GET_ADDR,
+       IOCMD_RPORT_GET_STATS,
+       IOCMD_FABRIC_GET_LPORTS,
+       IOCMD_FCPIM_MODSTATS,
+       IOCMD_FCPIM_DEL_ITN_STATS,
+       IOCMD_ITNIM_GET_ATTR,
+       IOCMD_ITNIM_GET_IOSTATS,
+       IOCMD_ITNIM_GET_ITNSTATS,
+       IOCMD_IOC_PCIFN_CFG,
+       IOCMD_FCPORT_ENABLE,
+       IOCMD_FCPORT_DISABLE,
+       IOCMD_PCIFN_CREATE,
+       IOCMD_PCIFN_DELETE,
+       IOCMD_PCIFN_BW,
+       IOCMD_ADAPTER_CFG_MODE,
+       IOCMD_PORT_CFG_MODE,
+       IOCMD_FLASH_ENABLE_OPTROM,
+       IOCMD_FLASH_DISABLE_OPTROM,
+       IOCMD_FAA_ENABLE,
+       IOCMD_FAA_DISABLE,
+       IOCMD_FAA_QUERY,
+       IOCMD_CEE_GET_ATTR,
+       IOCMD_CEE_GET_STATS,
+       IOCMD_CEE_RESET_STATS,
+       IOCMD_SFP_MEDIA,
+       IOCMD_SFP_SPEED,
+       IOCMD_FLASH_GET_ATTR,
+       IOCMD_FLASH_ERASE_PART,
+       IOCMD_FLASH_UPDATE_PART,
+       IOCMD_FLASH_READ_PART,
+       IOCMD_DIAG_TEMP,
+       IOCMD_DIAG_MEMTEST,
+       IOCMD_DIAG_LOOPBACK,
+       IOCMD_DIAG_FWPING,
+       IOCMD_DIAG_QUEUETEST,
+       IOCMD_DIAG_SFP,
+       IOCMD_DIAG_LED,
+       IOCMD_DIAG_BEACON_LPORT,
+       IOCMD_DIAG_LB_STAT,
+       IOCMD_PHY_GET_ATTR,
+       IOCMD_PHY_GET_STATS,
+       IOCMD_PHY_UPDATE_FW,
+       IOCMD_PHY_READ_FW,
+       IOCMD_VHBA_QUERY,
+       IOCMD_DEBUG_PORTLOG,
+};
+
+struct bfa_bsg_gen_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+};
+
+struct bfa_bsg_ioc_info_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       char            serialnum[64];
+       char            hwpath[BFA_STRING_32];
+       char            adapter_hwpath[BFA_STRING_32];
+       char            guid[BFA_ADAPTER_SYM_NAME_LEN*2];
+       char            name[BFA_ADAPTER_SYM_NAME_LEN];
+       char            port_name[BFA_ADAPTER_SYM_NAME_LEN];
+       char            eth_name[BFA_ADAPTER_SYM_NAME_LEN];
+       wwn_t           pwwn;
+       wwn_t           nwwn;
+       wwn_t           factorypwwn;
+       wwn_t           factorynwwn;
+       mac_t           mac;
+       mac_t           factory_mac; /* Factory mac address */
+       mac_t           current_mac; /* Currently assigned mac address */
+       enum bfa_ioc_type_e     ioc_type;
+       u16             pvid; /* Port vlan id */
+       u16             rsvd1;
+       u32             host;
+       u32             bandwidth; /* For PF support */
+       u32             rsvd2;
+};
+
+struct bfa_bsg_ioc_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_ioc_attr_s  ioc_attr;
+};
+
+struct bfa_bsg_ioc_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_ioc_stats_s ioc_stats;
+};
+
+struct bfa_bsg_ioc_fwstats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_iocfc_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_iocfc_attr_s iocfc_attr;
+};
+
+struct bfa_bsg_iocfc_intr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_iocfc_intr_attr_s attr;
+};
+
+struct bfa_bsg_port_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_port_attr_s  attr;
+};
+
+struct bfa_bsg_port_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_lport_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       struct bfa_lport_attr_s port_attr;
+};
+
+struct bfa_bsg_lport_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       struct bfa_lport_stats_s port_stats;
+};
+
+struct bfa_bsg_lport_iostats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_lport_get_rports_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       u64             rbuf_ptr;
+       u32             nrports;
+       u32             rsvd;
+};
+
+struct bfa_bsg_rport_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       struct bfa_rport_attr_s attr;
+};
+
+struct bfa_bsg_rport_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       struct bfa_rport_stats_s stats;
+};
+
+struct bfa_bsg_rport_scsi_addr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       u32             host;
+       u32             bus;
+       u32             target;
+       u32             lun;
+};
+
+struct bfa_bsg_fabric_get_lports_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       u64             buf_ptr;
+       u32             nports;
+       u32             rsvd;
+};
+
+struct bfa_bsg_fcpim_modstats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       struct bfa_itnim_iostats_s modstats;
+};
+
+struct bfa_bsg_fcpim_del_itn_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       struct bfa_fcpim_del_itn_stats_s modstats;
+};
+
+struct bfa_bsg_itnim_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_attr_s attr;
+};
+
+struct bfa_bsg_itnim_iostats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_itnim_itnstats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_stats_s itnstats;
+};
+
+struct bfa_bsg_pcifn_cfg_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     rsvd;
+       struct bfa_ablk_cfg_s   pcifn_cfg;
+};
+
+struct bfa_bsg_pcifn_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     pcifn_id;
+       u32                     bandwidth;
+       u8                      port;
+       enum bfi_pcifn_class    pcifn_class;
+       u8                      rsvd[1];
+};
+
+struct bfa_bsg_adapter_cfg_mode_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_adapter_cfg_mode_s   cfg;
+};
+
+struct bfa_bsg_port_cfg_mode_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             instance;
+       struct bfa_port_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_faa_attr_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     rsvd;
+       struct bfa_faa_attr_s   faa_attr;
+};
+
+struct bfa_bsg_cee_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_cee_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_sfp_media_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_defs_sfp_media_e media;
+};
+
+struct bfa_bsg_sfp_speed_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_flash_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_flash_attr_s attr;
+};
+
+struct bfa_bsg_flash_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u8              instance;
+       u8              rsvd;
+       enum  bfa_flash_part_type type;
+       int             bufsz;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_diag_get_temp_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_diag_results_tempsensor_s result;
+};
+
+struct bfa_bsg_diag_memtest_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd[3];
+       u32             pat;
+       struct bfa_diag_memtest_result result;
+       struct bfa_diag_memtest_s memtest;
+};
+
+struct bfa_bsg_diag_loopback_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_port_opmode opmode;
+       enum bfa_port_speed speed;
+       u32             lpcnt;
+       u32             pat;
+       struct bfa_diag_loopback_result_s result;
+};
+
+struct bfa_bsg_diag_fwping_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             cnt;
+       u32             pattern;
+       struct bfa_diag_results_fwping result;
+};
+
+struct bfa_bsg_diag_qtest_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     rsvd;
+       u32     force;
+       u32     queue;
+       struct bfa_diag_qtest_result_s result;
+};
+
+struct bfa_bsg_sfp_show_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct sfp_mem_s sfp;
+};
+
+struct bfa_bsg_diag_led_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_diag_ledtest_s ledtest;
+};
+
+struct bfa_bsg_diag_beacon_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       bfa_boolean_t   beacon;
+       bfa_boolean_t   link_e2e_beacon;
+       u32             second;
+};
+
+struct bfa_bsg_diag_lb_stat_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+};
+
+struct bfa_bsg_phy_attr_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     instance;
+       struct bfa_phy_attr_s   attr;
+};
+
+struct bfa_bsg_phy_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     instance;
+       u64     bufsz;
+       u64     buf_ptr;
+};
+
+struct bfa_bsg_debug_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             bufsz;
+       int             inst_no;
+       u64             buf_ptr;
+       u64             offset;
+};
+
+struct bfa_bsg_phy_stats_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     instance;
+       struct bfa_phy_stats_s  stats;
+};
+
+struct bfa_bsg_vhba_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             pcifn_id;
+       struct bfa_vhba_attr_s  attr;
+};
+
+struct bfa_bsg_fcpt_s {
+       bfa_status_t    status;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           dpwwn;
+       u32             tsecs;
+       int             cts;
+       enum fc_cos     cos;
+       struct fchs_s   fchs;
+};
+#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
+
+struct bfa_bsg_data {
+       int payload_len;
+       void *payload;
+};
+
+#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz)     \
+       (((__payload_len) != ((__hdrsz) + (__bufsz))) ?         \
+        BFA_STATUS_FAILED : BFA_STATUS_OK)
+
+#endif /* BFAD_BSG_H */
index 48be0c5..b412e03 100644 (file)
@@ -214,10 +214,10 @@ bfad_debugfs_read(struct file *file, char __user *buf,
 
 #define BFA_REG_CT_ADDRSZ      (0x40000)
 #define BFA_REG_CB_ADDRSZ      (0x20000)
-#define BFA_REG_ADDRSZ(__bfa)  \
-       ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ?       \
-               BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
-#define BFA_REG_ADDRMSK(__bfa)  ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
+#define BFA_REG_ADDRSZ(__ioc)  \
+       ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ?  \
+        BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
 
 static bfa_status_t
 bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -236,7 +236,7 @@ bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
                        return BFA_STATUS_EINVAL;
        } else {
                /* CB register space 64KB */
-               if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa))
+               if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc))
                        return BFA_STATUS_EINVAL;
        }
        return BFA_STATUS_OK;
@@ -317,7 +317,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
 
        bfad->reglen = len << 2;
        rb = bfa_ioc_bar0(ioc);
-       addr &= BFA_REG_ADDRMSK(bfa);
+       addr &= BFA_REG_ADDRMSK(ioc);
 
        /* offset and len sanity check */
        rc = bfad_reg_offset_check(bfa, addr, len);
@@ -380,7 +380,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
        }
        kfree(kern_buf);
 
-       addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
+       addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
 
        /* offset and len sanity check */
        rc = bfad_reg_offset_check(bfa, addr, 1);
index 7f9ea90..48661a2 100644 (file)
@@ -43,6 +43,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport.h>
+#include <scsi/scsi_bsg_fc.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -55,7 +56,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "2.3.2.3"
+#define BFAD_DRIVER_VERSION    "3.0.2.1"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -79,7 +80,7 @@
 #define BFAD_HAL_INIT_FAIL                     0x00000100
 #define BFAD_FC4_PROBE_DONE                    0x00000200
 #define BFAD_PORT_DELETE                       0x00000001
-
+#define BFAD_INTX_ON                           0x00000400
 /*
  * BFAD related definition
  */
@@ -92,6 +93,8 @@
  */
 #define BFAD_LUN_QUEUE_DEPTH   32
 #define BFAD_IO_MAX_SGE                SG_ALL
+#define BFAD_MIN_SECTORS       128 /* 64k   */
+#define BFAD_MAX_SECTORS       0xFFFF  /* 32 MB */
 
 #define bfad_isr_t irq_handler_t
 
@@ -110,6 +113,7 @@ struct bfad_msix_s {
 enum {
        BFA_TRC_LDRV_BFAD               = 1,
        BFA_TRC_LDRV_IM                 = 2,
+       BFA_TRC_LDRV_BSG                = 3,
 };
 
 enum bfad_port_pvb_type {
@@ -189,8 +193,10 @@ struct bfad_s {
        struct bfa_pcidev_s hal_pcidev;
        struct bfa_ioc_pci_attr_s pci_attr;
        void __iomem   *pci_bar0_kva;
+       void __iomem   *pci_bar2_kva;
        struct completion comp;
        struct completion suspend;
+       struct completion enable_comp;
        struct completion disable_comp;
        bfa_boolean_t   disable_active;
        struct bfad_port_s     pport;   /* physical port of the BFAD */
@@ -273,21 +279,6 @@ struct bfad_hal_comp {
        struct completion comp;
 };
 
-/*
- * Macro to obtain the immediate lower power
- * of two for the integer.
- */
-#define nextLowerInt(x)                         \
-do {                                            \
-       int __i;                                  \
-       (*x)--;                                 \
-       for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
-               (*x) = (*x) | (*x) >> __i;      \
-       (*x)++;                                 \
-       (*x) = (*x) >> 1;                       \
-} while (0)
-
-
 #define BFA_LOG(level, bfad, mask, fmt, arg...)                                \
 do {                                                                   \
        if (((mask) == 4) || (level[1] <= '4'))                         \
@@ -354,6 +345,7 @@ extern int      msix_disable_ct;
 extern int      fdmi_enable;
 extern int      supported_fc4s;
 extern int     pcie_max_read_reqsz;
+extern int     max_xfer_size;
 extern int bfa_debugfs_enable;
 extern struct mutex bfad_mutex;
 
index c2b3617..f2bf812 100644 (file)
@@ -175,21 +175,11 @@ bfad_im_info(struct Scsi_Host *shost)
        struct bfad_im_port_s *im_port =
                        (struct bfad_im_port_s *) shost->hostdata[0];
        struct bfad_s *bfad = im_port->bfad;
-       struct bfa_s *bfa = &bfad->bfa;
-       struct bfa_ioc_s *ioc = &bfa->ioc;
-       char model[BFA_ADAPTER_MODEL_NAME_LEN];
-
-       bfa_get_adapter_model(bfa, model);
 
        memset(bfa_buf, 0, sizeof(bfa_buf));
-       if (ioc->ctdev && !ioc->fcmode)
-               snprintf(bfa_buf, sizeof(bfa_buf),
-               "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
-                model, bfad->pci_name, BFAD_DRIVER_VERSION);
-       else
-               snprintf(bfa_buf, sizeof(bfa_buf),
-               "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
-               model, bfad->pci_name, BFAD_DRIVER_VERSION);
+       snprintf(bfa_buf, sizeof(bfa_buf),
+               "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s",
+               bfad->pci_name, BFAD_DRIVER_VERSION);
 
        return bfa_buf;
 }
@@ -572,9 +562,6 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
                goto out_fc_rel;
        }
 
-       /* setup host fixed attribute if the lk supports */
-       bfad_fc_host_init(im_port);
-
        return 0;
 
 out_fc_rel:
@@ -713,6 +700,9 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
        else
                sht = &bfad_im_vport_template;
 
+       if (max_xfer_size != BFAD_MAX_SECTORS >> 1)
+               sht->max_sectors = max_xfer_size << 1;
+
        sht->sg_tablesize = bfad->cfg_data.io_max_sge;
 
        return scsi_host_alloc(sht, sizeof(unsigned long));
@@ -790,7 +780,8 @@ struct scsi_host_template bfad_im_scsi_host_template = {
        .cmd_per_lun = 3,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = bfad_im_host_attrs,
-       .max_sectors = 0xFFFF,
+       .max_sectors = BFAD_MAX_SECTORS,
+       .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
 };
 
 struct scsi_host_template bfad_im_vport_template = {
@@ -811,7 +802,7 @@ struct scsi_host_template bfad_im_vport_template = {
        .cmd_per_lun = 3,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = bfad_im_vport_attrs,
-       .max_sectors = 0xFFFF,
+       .max_sectors = BFAD_MAX_SECTORS,
 };
 
 bfa_status_t
@@ -925,7 +916,10 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
                return 0;
 
        bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
-       if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+       if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS)
+               supported_speed |=  FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+                               FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT;
+       else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
                if (ioc_attr->adapter_attr.is_mezz) {
                        supported_speed |= FC_PORTSPEED_8GBIT |
                                FC_PORTSPEED_4GBIT |
index c296c89..4fe34d5 100644 (file)
@@ -141,4 +141,7 @@ extern struct device_attribute *bfad_im_vport_attrs[];
 
 irqreturn_t bfad_intx(int irq, void *dev_id);
 
+int bfad_im_bsg_request(struct fc_bsg_job *job);
+int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+
 #endif
index 72b69a0..1e258d5 100644 (file)
 
 #pragma pack(1)
 
+/* Per dma segment max size */
+#define BFI_MEM_DMA_SEG_SZ     (131072)
+
+/* Get number of dma segments required */
+#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz)                          \
+       ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) &  \
+        ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
+
+/* Get num dma reqs - that fit in a segment */
+#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
+
+/* Get segment num from tag */
+#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
+
+/* Get dma req offset in a segment */
+#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz)      \
+       ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
+
 /*
  * BFI FW image type
  */
 #define        BFI_FLASH_CHUNK_SZ                      256     /*  Flash chunk size */
 #define        BFI_FLASH_CHUNK_SZ_WORDS        (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-enum {
-       BFI_IMAGE_CB_FC,
-       BFI_IMAGE_CT_FC,
-       BFI_IMAGE_CT_CNA,
-       BFI_IMAGE_MAX,
-};
 
 /*
  * Msg header common to all msgs
@@ -43,17 +55,20 @@ struct bfi_mhdr_s {
        u8              msg_id;         /*  msg opcode with in the class   */
        union {
                struct {
-                       u8      rsvd;
-                       u8      lpu_id; /*  msg destination                 */
+                       u8      qid;
+                       u8      fn_lpu; /*  msg destination                 */
                } h2i;
                u16     i2htok; /*  token in msgs to host           */
        } mtag;
 };
 
-#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do {                \
+#define bfi_fn_lpu(__fn, __lpu)        ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh)     ((_mh)->mtag.h2i.fn_lpu >> 1)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do {               \
        (_mh).msg_class         = (_mc);      \
        (_mh).msg_id            = (_op);      \
-       (_mh).mtag.h2i.lpu_id   = (_lpuid);      \
+       (_mh).mtag.h2i.fn_lpu   = (_fn_lpu);      \
 } while (0)
 
 #define bfi_i2h_set(_mh, _mc, _op, _i2htok) do {               \
@@ -101,7 +116,7 @@ union bfi_addr_u {
 };
 
 /*
- * Scatter Gather Element
+ * Scatter Gather Element used for fast-path IO requests
  */
 struct bfi_sge_s {
 #ifdef __BIG_ENDIAN
@@ -116,6 +131,14 @@ struct bfi_sge_s {
        union bfi_addr_u sga;
 };
 
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen_s {
+       union bfi_addr_u        al_addr;        /* DMA addr of buffer   */
+       u32                     al_len;         /* length of buffer     */
+};
+
 /*
  * Scatter Gather Page
  */
@@ -127,6 +150,12 @@ struct bfi_sgpg_s {
        u32     rsvd[BFI_SGPG_RSVD_WD_LEN];
 };
 
+/* FCP module definitions */
+#define BFI_IO_MAX     (2000)
+#define BFI_IOIM_SNSLEN        (256)
+#define BFI_IOIM_SNSBUF_SEGS   \
+       BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
+
 /*
  * Large Message structure - 128 Bytes size Msgs
  */
@@ -148,19 +177,30 @@ struct bfi_mbmsg_s {
        u32             pl[BFI_MBMSG_SZ];
 };
 
+/*
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+       BFI_PCIFN_CLASS_FC  = 0x0c04,
+       BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
 /*
  * Message Classes
  */
 enum bfi_mclass {
        BFI_MC_IOC              = 1,    /*  IO Controller (IOC)     */
+       BFI_MC_DIAG             = 2,    /*  Diagnostic Msgs            */
+       BFI_MC_FLASH            = 3,    /*  Flash message class */
+       BFI_MC_CEE              = 4,    /*  CEE */
        BFI_MC_FCPORT           = 5,    /*  FC port                         */
        BFI_MC_IOCFC            = 6,    /*  FC - IO Controller (IOC)        */
-       BFI_MC_LL               = 7,    /*  Link Layer                      */
+       BFI_MC_ABLK             = 7,    /*  ASIC block configuration        */
        BFI_MC_UF               = 8,    /*  Unsolicited frame receive       */
        BFI_MC_FCXP             = 9,    /*  FC Transport                    */
        BFI_MC_LPS              = 10,   /*  lport fc login services         */
        BFI_MC_RPORT            = 11,   /*  Remote port             */
-       BFI_MC_ITNIM            = 12,   /*  I-T nexus (Initiator mode)      */
+       BFI_MC_ITN              = 12,   /*  I-T nexus (Initiator mode)      */
        BFI_MC_IOIM_READ        = 13,   /*  read IO (Initiator mode)        */
        BFI_MC_IOIM_WRITE       = 14,   /*  write IO (Initiator mode)       */
        BFI_MC_IOIM_IO          = 15,   /*  IO (Initiator mode)     */
@@ -168,6 +208,8 @@ enum bfi_mclass {
        BFI_MC_IOIM_IOCOM       = 17,   /*  good IO completion              */
        BFI_MC_TSKIM            = 18,   /*  Initiator Task management       */
        BFI_MC_PORT             = 21,   /*  Physical port                   */
+       BFI_MC_SFP              = 22,   /*  SFP module  */
+       BFI_MC_PHY              = 25,   /*  External PHY message class  */
        BFI_MC_MAX              = 32
 };
 
@@ -175,23 +217,28 @@ enum bfi_mclass {
 #define BFI_IOC_MAX_CQS_ASIC   8
 #define BFI_IOC_MSGLEN_MAX     32      /* 32 bytes */
 
-#define BFI_BOOT_TYPE_OFF              8
-#define BFI_BOOT_LOADER_OFF            12
-
-#define BFI_BOOT_TYPE_NORMAL           0
-#define        BFI_BOOT_TYPE_FLASH             1
-#define        BFI_BOOT_TYPE_MEMTEST           2
-
-#define BFI_BOOT_LOADER_OS             0
-#define BFI_BOOT_LOADER_BIOS           1
-#define BFI_BOOT_LOADER_UEFI           2
-
 /*
  *----------------------------------------------------------------------
  *                             IOC
  *----------------------------------------------------------------------
  */
 
+/*
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+       BFI_ASIC_GEN_CB         = 1,    /* crossbow 8G FC               */
+       BFI_ASIC_GEN_CT         = 2,    /* catapult 8G FC or 10G CNA    */
+       BFI_ASIC_GEN_CT2        = 3,    /* catapult-2 16G FC or 10G CNA */
+};
+
+enum bfi_asic_mode {
+       BFI_ASIC_MODE_FC        = 1,    /* FC upto 8G speed             */
+       BFI_ASIC_MODE_FC16      = 2,    /* FC upto 16G speed            */
+       BFI_ASIC_MODE_ETH       = 3,    /* Ethernet ports               */
+       BFI_ASIC_MODE_COMBO     = 4,    /* FC 16G and Ethernet 10G port */
+};
+
 enum bfi_ioc_h2i_msgs {
        BFI_IOC_H2I_ENABLE_REQ          = 1,
        BFI_IOC_H2I_DISABLE_REQ         = 2,
@@ -204,8 +251,8 @@ enum bfi_ioc_i2h_msgs {
        BFI_IOC_I2H_ENABLE_REPLY        = BFA_I2HM(1),
        BFI_IOC_I2H_DISABLE_REPLY       = BFA_I2HM(2),
        BFI_IOC_I2H_GETATTR_REPLY       = BFA_I2HM(3),
-       BFI_IOC_I2H_READY_EVENT         = BFA_I2HM(4),
-       BFI_IOC_I2H_HBEAT               = BFA_I2HM(5),
+       BFI_IOC_I2H_HBEAT               = BFA_I2HM(4),
+       BFI_IOC_I2H_ACQ_ADDR_REPLY      = BFA_I2HM(5),
 };
 
 /*
@@ -220,7 +267,8 @@ struct bfi_ioc_attr_s {
        wwn_t           mfg_pwwn;       /*  Mfg port wwn           */
        wwn_t           mfg_nwwn;       /*  Mfg node wwn           */
        mac_t           mfg_mac;        /*  Mfg mac                */
-       u16     rsvd_a;
+       u8              port_mode;      /* bfi_port_mode           */
+       u8              rsvd_a;
        wwn_t           pwwn;
        wwn_t           nwwn;
        mac_t           mac;            /*  PBC or Mfg mac         */
@@ -272,21 +320,33 @@ struct bfi_ioc_getattr_reply_s {
 #define BFI_IOC_FW_SIGNATURE   (0xbfadbfad)
 #define BFI_IOC_MD5SUM_SZ      4
 struct bfi_ioc_image_hdr_s {
-       u32     signature;      /*  constant signature */
-       u32     rsvd_a;
-       u32     exec;           /*  exec vector */
-       u32     param;          /*  parameters          */
+       u32     signature;      /* constant signature           */
+       u8      asic_gen;       /* asic generation              */
+       u8      asic_mode;
+       u8      port0_mode;     /* device mode for port 0       */
+       u8      port1_mode;     /* device mode for port 1       */
+       u32     exec;           /* exec vector                  */
+       u32     bootenv;        /* fimware boot env             */
        u32     rsvd_b[4];
        u32     md5sum[BFI_IOC_MD5SUM_SZ];
 };
 
-/*
- *  BFI_IOC_I2H_READY_EVENT message
- */
-struct bfi_ioc_rdy_event_s {
-       struct bfi_mhdr_s       mh;             /*  common msg header */
-       u8                      init_status;    /*  init event status */
-       u8                      rsvd[3];
+#define BFI_FWBOOT_DEVMODE_OFF         4
+#define BFI_FWBOOT_TYPE_OFF            8
+#define BFI_FWBOOT_ENV_OFF             12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+       (((u32)(__asic_gen)) << 24 |            \
+        ((u32)(__asic_mode)) << 16 |           \
+        ((u32)(__p0_mode)) << 8 |              \
+        ((u32)(__p1_mode)))
+
+#define BFI_FWBOOT_TYPE_NORMAL 0
+#define BFI_FWBOOT_TYPE_MEMTEST        2
+#define BFI_FWBOOT_ENV_OS       0
+
+enum bfi_port_mode {
+       BFI_PORT_MODE_FC        = 1,
+       BFI_PORT_MODE_ETH       = 2,
 };
 
 struct bfi_ioc_hbeat_s {
@@ -345,8 +405,8 @@ enum {
  */
 struct bfi_ioc_ctrl_req_s {
        struct bfi_mhdr_s       mh;
-       u8                      ioc_class;
-       u8                      rsvd[3];
+       u16                     clscode;
+       u16                     rsvd;
        u32             tv_sec;
 };
 #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
@@ -358,7 +418,9 @@ struct bfi_ioc_ctrl_req_s {
 struct bfi_ioc_ctrl_reply_s {
        struct bfi_mhdr_s       mh;             /*  Common msg header     */
        u8                      status;         /*  enable/disable status */
-       u8                      rsvd[3];
+       u8                      port_mode;      /*  bfa_mode_s  */
+       u8                      cap_bm;         /*  capability bit mask */
+       u8                      rsvd;
 };
 #define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
 #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
@@ -380,7 +442,7 @@ union bfi_ioc_h2i_msg_u {
  */
 union bfi_ioc_i2h_msg_u {
        struct bfi_mhdr_s               mh;
-       struct bfi_ioc_rdy_event_s      rdy_event;
+       struct bfi_ioc_ctrl_reply_s     fw_event;
        u32                     mboxmsg[BFI_IOC_MSGSZ];
 };
 
@@ -393,6 +455,7 @@ union bfi_ioc_i2h_msg_u {
 
 #define BFI_PBC_MAX_BLUNS      8
 #define BFI_PBC_MAX_VPORTS     16
+#define BFI_PBC_PORT_DISABLED  2
 
 /*
  * PBC boot lun configuration
@@ -574,6 +637,496 @@ union bfi_port_i2h_msg_u {
        struct bfi_port_generic_rsp_s   clearstats_rsp;
 };
 
+/*
+ *----------------------------------------------------------------------
+ *                             ABLK
+ *----------------------------------------------------------------------
+ */
+enum bfi_ablk_h2i_msgs_e {
+       BFI_ABLK_H2I_QUERY              = 1,
+       BFI_ABLK_H2I_ADPT_CONFIG        = 2,
+       BFI_ABLK_H2I_PORT_CONFIG        = 3,
+       BFI_ABLK_H2I_PF_CREATE          = 4,
+       BFI_ABLK_H2I_PF_DELETE          = 5,
+       BFI_ABLK_H2I_PF_UPDATE          = 6,
+       BFI_ABLK_H2I_OPTROM_ENABLE      = 7,
+       BFI_ABLK_H2I_OPTROM_DISABLE     = 8,
+};
+
+enum bfi_ablk_i2h_msgs_e {
+       BFI_ABLK_I2H_QUERY              = BFA_I2HM(BFI_ABLK_H2I_QUERY),
+       BFI_ABLK_I2H_ADPT_CONFIG        = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG),
+       BFI_ABLK_I2H_PORT_CONFIG        = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG),
+       BFI_ABLK_I2H_PF_CREATE          = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE),
+       BFI_ABLK_I2H_PF_DELETE          = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE),
+       BFI_ABLK_I2H_PF_UPDATE          = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE),
+       BFI_ABLK_I2H_OPTROM_ENABLE      = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE),
+       BFI_ABLK_I2H_OPTROM_DISABLE     = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE),
+};
+
+/* BFI_ABLK_H2I_QUERY */
+struct bfi_ablk_h2i_query_s {
+       struct bfi_mhdr_s       mh;
+       union bfi_addr_u        addr;
+};
+
+/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */
+struct bfi_ablk_h2i_cfg_req_s {
+       struct bfi_mhdr_s       mh;
+       u8                      mode;
+       u8                      port;
+       u8                      max_pf;
+       u8                      max_vf;
+};
+
+/*
+ * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE,
+ */
+struct bfi_ablk_h2i_pf_req_s {
+       struct bfi_mhdr_s       mh;
+       u8                      pcifn;
+       u8                      port;
+       u16                     pers;
+       u32                     bw;
+};
+
+/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
+struct bfi_ablk_h2i_optrom_s {
+       struct bfi_mhdr_s       mh;
+};
+
+/*
+ * BFI_ABLK_I2H_QUERY
+ * BFI_ABLK_I2H_PORT_CONFIG
+ * BFI_ABLK_I2H_PF_CREATE
+ * BFI_ABLK_I2H_PF_DELETE
+ * BFI_ABLK_I2H_PF_UPDATE
+ * BFI_ABLK_I2H_OPTROM_ENABLE
+ * BFI_ABLK_I2H_OPTROM_DISABLE
+ */
+struct bfi_ablk_i2h_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      status;
+       u8                      pcifn;
+       u8                      port_mode;
+};
+
+
+/*
+ *     CEE module specific messages
+ */
+
+/* Mailbox commands from host to firmware */
+enum bfi_cee_h2i_msgs_e {
+       BFI_CEE_H2I_GET_CFG_REQ = 1,
+       BFI_CEE_H2I_RESET_STATS = 2,
+       BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+enum bfi_cee_i2h_msgs_e {
+       BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+       BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+       BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/*
+ * H2I command structure for resetting the stats
+ */
+struct bfi_cee_reset_stats_s {
+       struct bfi_mhdr_s  mh;
+};
+
+/*
+ * Get configuration  command from host
+ */
+struct bfi_cee_get_req_s {
+       struct bfi_mhdr_s       mh;
+       union bfi_addr_u        dma_addr;
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_get_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      cmd_status;
+       u8                      rsvd[3];
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_stats_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      cmd_status;
+       u8                      rsvd[3];
+};
+
+/* Mailbox message structures from firmware to host    */
+union bfi_cee_i2h_msg_u {
+       struct bfi_mhdr_s               mh;
+       struct bfi_cee_get_rsp_s        get_rsp;
+       struct bfi_cee_stats_rsp_s      stats_rsp;
+};
+
+/*
+ * SFP related
+ */
+
+enum bfi_sfp_h2i_e {
+       BFI_SFP_H2I_SHOW        = 1,
+       BFI_SFP_H2I_SCN         = 2,
+};
+
+enum bfi_sfp_i2h_e {
+       BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW),
+       BFI_SFP_I2H_SCN  = BFA_I2HM(BFI_SFP_H2I_SCN),
+};
+
+/*
+ *     SFP state
+ */
+enum bfa_sfp_stat_e {
+       BFA_SFP_STATE_INIT      = 0,    /* SFP state is uninit  */
+       BFA_SFP_STATE_REMOVED   = 1,    /* SFP is removed       */
+       BFA_SFP_STATE_INSERTED  = 2,    /* SFP is inserted      */
+       BFA_SFP_STATE_VALID     = 3,    /* SFP is valid         */
+       BFA_SFP_STATE_UNSUPPORT = 4,    /* SFP is unsupport     */
+       BFA_SFP_STATE_FAILED    = 5,    /* SFP i2c read fail    */
+};
+
+/*
+ *  SFP memory access type
+ */
+enum bfi_sfp_mem_e {
+       BFI_SFP_MEM_ALL         = 0x1,  /* access all data field */
+       BFI_SFP_MEM_DIAGEXT     = 0x2,  /* access diag ext data field only */
+};
+
+struct bfi_sfp_req_s {
+       struct bfi_mhdr_s       mh;
+       u8                      memtype;
+       u8                      rsvd[3];
+       struct bfi_alen_s       alen;
+};
+
+struct bfi_sfp_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      status;
+       u8                      state;
+       u8                      rsvd[2];
+};
+
+/*
+ *     FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+       BFI_FLASH_H2I_QUERY_REQ = 1,
+       BFI_FLASH_H2I_ERASE_REQ = 2,
+       BFI_FLASH_H2I_WRITE_REQ = 3,
+       BFI_FLASH_H2I_READ_REQ = 4,
+       BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+       BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+       BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+       BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+       BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+       BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+       BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       struct bfi_alen_s alen;
+};
+
+/*
+ * Flash erase request
+ */
+struct bfi_flash_erase_req_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32     type;   /* partition type */
+       u8      instance; /* partition instance */
+       u8      rsv[3];
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       struct bfi_alen_s alen;
+       u32     type;   /* partition type */
+       u8      instance; /* partition instance */
+       u8      last;
+       u8      rsv[2];
+       u32     offset;
+       u32     length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     offset;
+       u32     length;
+       struct bfi_alen_s alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;       /* partition type */
+       u8      instance;   /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;       /* partition type */
+       u8      instance;   /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
+/*
+ * Flash erase response
+ */
+struct bfi_flash_erase_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     status;
+};
+
+/*
+ *----------------------------------------------------------------------
+ *                             DIAG
+ *----------------------------------------------------------------------
+ */
+enum bfi_diag_h2i {
+       BFI_DIAG_H2I_PORTBEACON = 1,
+       BFI_DIAG_H2I_LOOPBACK = 2,
+       BFI_DIAG_H2I_FWPING = 3,
+       BFI_DIAG_H2I_TEMPSENSOR = 4,
+       BFI_DIAG_H2I_LEDTEST = 5,
+       BFI_DIAG_H2I_QTEST      = 6,
+};
+
+enum bfi_diag_i2h {
+       BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON),
+       BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK),
+       BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING),
+       BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
+       BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
+       BFI_DIAG_I2H_QTEST      = BFA_I2HM(BFI_DIAG_H2I_QTEST),
+};
+
+#define BFI_DIAG_MAX_SGES      2
+#define BFI_DIAG_DMA_BUF_SZ    (2 * 1024)
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG  0xA0A1A2A3
+
+struct bfi_diag_lb_req_s {
+       struct bfi_mhdr_s mh;
+       u32     loopcnt;
+       u32     pattern;
+       u8      lb_mode;        /*!< bfa_port_opmode_t */
+       u8      speed;          /*!< bfa_port_speed_t */
+       u8      rsvd[2];
+};
+
+struct bfi_diag_lb_rsp_s {
+       struct bfi_mhdr_s  mh;          /* 4 bytes */
+       struct bfa_diag_loopback_result_s res; /* 16 bytes */
+};
+
+struct bfi_diag_fwping_req_s {
+       struct bfi_mhdr_s mh;   /* 4 bytes */
+       struct bfi_alen_s alen; /* 12 bytes */
+       u32     data;           /* user input data pattern */
+       u32     count;          /* user input dma count */
+       u8      qtag;           /* track CPE vc */
+       u8      rsv[3];
+};
+
+struct bfi_diag_fwping_rsp_s {
+       struct bfi_mhdr_s  mh;          /* 4 bytes */
+       u32     data;           /* user input data pattern    */
+       u8      qtag;           /* track CPE vc               */
+       u8      dma_status;     /* dma status                 */
+       u8      rsv[2];
+};
+
+/*
+ * Temperature Sensor
+ */
+struct bfi_diag_ts_req_s {
+       struct bfi_mhdr_s mh;   /* 4 bytes */
+       u16     temp;           /* 10-bit A/D value */
+       u16     brd_temp;       /* 9-bit board temp */
+       u8      status;
+       u8      ts_junc;        /* show junction tempsensor   */
+       u8      ts_brd;         /* show board tempsensor      */
+       u8      rsv;
+};
+#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s
+
+struct bfi_diag_ledtest_req_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+       u8      cmd;
+       u8      color;
+       u8      portid;
+       u8      led;    /* bitmap of LEDs to be tested */
+       u16     freq;   /* no. of blinks every 10 secs */
+       u8      rsv[2];
+};
+
+/* notify host led operation is done */
+struct bfi_diag_ledtest_rsp_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+};
+
+struct bfi_diag_portbeacon_req_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+       u32     period; /* beaconing period */
+       u8      beacon; /* 1: beacon on */
+       u8      rsvd[3];
+};
+
+/* notify host the beacon is off */
+struct bfi_diag_portbeacon_rsp_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+};
+
+struct bfi_diag_qtest_req_s {
+       struct bfi_mhdr_s       mh;             /* 4 bytes */
+       u32     data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */
+};
+#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
+
+/*
+ *     PHY module specific
+ */
+enum bfi_phy_h2i_msgs_e {
+       BFI_PHY_H2I_QUERY_REQ = 1,
+       BFI_PHY_H2I_STATS_REQ = 2,
+       BFI_PHY_H2I_WRITE_REQ = 3,
+       BFI_PHY_H2I_READ_REQ = 4,
+};
+
+enum bfi_phy_i2h_msgs_e {
+       BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1),
+       BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2),
+       BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3),
+       BFI_PHY_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * External PHY query request
+ */
+struct bfi_phy_query_req_s {
+       struct bfi_mhdr_s       mh;             /* Common msg header */
+       u8                      instance;
+       u8                      rsv[3];
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY stats request
+ */
+struct bfi_phy_stats_req_s {
+       struct bfi_mhdr_s       mh;             /* Common msg header */
+       u8                      instance;
+       u8                      rsv[3];
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY write request
+ */
+struct bfi_phy_write_req_s {
+       struct bfi_mhdr_s       mh;             /* Common msg header */
+       u8              instance;
+       u8              last;
+       u8              rsv[2];
+       u32             offset;
+       u32             length;
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY read request
+ */
+struct bfi_phy_read_req_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u8              instance;
+       u8              rsv[3];
+       u32             offset;
+       u32             length;
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY query response
+ */
+struct bfi_phy_query_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+};
+
+/*
+ * External PHY stats response
+ */
+struct bfi_phy_stats_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+};
+
+/*
+ * External PHY read response
+ */
+struct bfi_phy_read_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+       u32             length;
+};
+
+/*
+ * External PHY write response
+ */
+struct bfi_phy_write_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+       u32                     length;
+};
+
 #pragma pack()
 
 #endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
deleted file mode 100644 (file)
index 39ad42b..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-
-/*
- * bfi_cbreg.h crossbow host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CBREG_H__
-#define __BFI_CBREG_H__
-
-
-#define HOSTFN0_INT_STATUS               0x00014000
-#define __HOSTFN0_INT_STATUS_LVL_MK      0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH      20
-#define __HOSTFN0_INT_STATUS_LVL(_v)     ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P           0x000fffff
-#define HOSTFN0_INT_MSK                  0x00014004
-#define HOST_PAGE_NUM_FN0                0x00014008
-#define __HOST_PAGE_NUM_FN               0x000001ff
-#define HOSTFN1_INT_STATUS               0x00014100
-#define __HOSTFN1_INT_STAT_LVL_MK        0x00f00000
-#define __HOSTFN1_INT_STAT_LVL_SH        20
-#define __HOSTFN1_INT_STAT_LVL(_v)       ((_v) << __HOSTFN1_INT_STAT_LVL_SH)
-#define __HOSTFN1_INT_STAT_P             0x000fffff
-#define HOSTFN1_INT_MSK                  0x00014104
-#define HOST_PAGE_NUM_FN1                0x00014108
-#define APP_PLL_400_CTL_REG              0x00014204
-#define __P_400_PLL_LOCK                 0x80000000
-#define __APP_PLL_400_SRAM_USE_100MHZ    0x00100000
-#define __APP_PLL_400_RESET_TIMER_MK     0x000e0000
-#define __APP_PLL_400_RESET_TIMER_SH     17
-#define __APP_PLL_400_RESET_TIMER(_v)    ((_v) << __APP_PLL_400_RESET_TIMER_SH)
-#define __APP_PLL_400_LOGIC_SOFT_RESET   0x00010000
-#define __APP_PLL_400_CNTLMT0_1_MK       0x0000c000
-#define __APP_PLL_400_CNTLMT0_1_SH       14
-#define __APP_PLL_400_CNTLMT0_1(_v)      ((_v) << __APP_PLL_400_CNTLMT0_1_SH)
-#define __APP_PLL_400_JITLMT0_1_MK       0x00003000
-#define __APP_PLL_400_JITLMT0_1_SH       12
-#define __APP_PLL_400_JITLMT0_1(_v)      ((_v) << __APP_PLL_400_JITLMT0_1_SH)
-#define __APP_PLL_400_HREF               0x00000800
-#define __APP_PLL_400_HDIV               0x00000400
-#define __APP_PLL_400_P0_1_MK            0x00000300
-#define __APP_PLL_400_P0_1_SH            8
-#define __APP_PLL_400_P0_1(_v)           ((_v) << __APP_PLL_400_P0_1_SH)
-#define __APP_PLL_400_Z0_2_MK            0x000000e0
-#define __APP_PLL_400_Z0_2_SH            5
-#define __APP_PLL_400_Z0_2(_v)           ((_v) << __APP_PLL_400_Z0_2_SH)
-#define __APP_PLL_400_RSEL200500         0x00000010
-#define __APP_PLL_400_ENARST             0x00000008
-#define __APP_PLL_400_BYPASS             0x00000004
-#define __APP_PLL_400_LRESETN            0x00000002
-#define __APP_PLL_400_ENABLE             0x00000001
-#define APP_PLL_212_CTL_REG              0x00014208
-#define __P_212_PLL_LOCK                 0x80000000
-#define __APP_PLL_212_RESET_TIMER_MK     0x000e0000
-#define __APP_PLL_212_RESET_TIMER_SH     17
-#define __APP_PLL_212_RESET_TIMER(_v)    ((_v) << __APP_PLL_212_RESET_TIMER_SH)
-#define __APP_PLL_212_LOGIC_SOFT_RESET   0x00010000
-#define __APP_PLL_212_CNTLMT0_1_MK       0x0000c000
-#define __APP_PLL_212_CNTLMT0_1_SH       14
-#define __APP_PLL_212_CNTLMT0_1(_v)      ((_v) << __APP_PLL_212_CNTLMT0_1_SH)
-#define __APP_PLL_212_JITLMT0_1_MK       0x00003000
-#define __APP_PLL_212_JITLMT0_1_SH       12
-#define __APP_PLL_212_JITLMT0_1(_v)      ((_v) << __APP_PLL_212_JITLMT0_1_SH)
-#define __APP_PLL_212_HREF               0x00000800
-#define __APP_PLL_212_HDIV               0x00000400
-#define __APP_PLL_212_P0_1_MK            0x00000300
-#define __APP_PLL_212_P0_1_SH            8
-#define __APP_PLL_212_P0_1(_v)           ((_v) << __APP_PLL_212_P0_1_SH)
-#define __APP_PLL_212_Z0_2_MK            0x000000e0
-#define __APP_PLL_212_Z0_2_SH            5
-#define __APP_PLL_212_Z0_2(_v)           ((_v) << __APP_PLL_212_Z0_2_SH)
-#define __APP_PLL_212_RSEL200500         0x00000010
-#define __APP_PLL_212_ENARST             0x00000008
-#define __APP_PLL_212_BYPASS             0x00000004
-#define __APP_PLL_212_LRESETN            0x00000002
-#define __APP_PLL_212_ENABLE             0x00000001
-#define HOST_SEM0_REG                    0x00014230
-#define __HOST_SEMAPHORE                 0x00000001
-#define HOST_SEM1_REG                    0x00014234
-#define HOST_SEM2_REG                    0x00014238
-#define HOST_SEM3_REG                    0x0001423c
-#define HOST_SEM0_INFO_REG               0x00014240
-#define HOST_SEM1_INFO_REG               0x00014244
-#define HOST_SEM2_INFO_REG               0x00014248
-#define HOST_SEM3_INFO_REG               0x0001424c
-#define HOSTFN0_LPU0_CMD_STAT            0x00019000
-#define __HOSTFN0_LPU0_MBOX_INFO_MK      0xfffffffe
-#define __HOSTFN0_LPU0_MBOX_INFO_SH      1
-#define __HOSTFN0_LPU0_MBOX_INFO(_v)     ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX_CMD_STATUS   0x00000001
-#define LPU0_HOSTFN0_CMD_STAT            0x00019008
-#define __LPU0_HOSTFN0_MBOX_INFO_MK      0xfffffffe
-#define __LPU0_HOSTFN0_MBOX_INFO_SH      1
-#define __LPU0_HOSTFN0_MBOX_INFO(_v)     ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX_CMD_STATUS   0x00000001
-#define HOSTFN1_LPU1_CMD_STAT            0x00019014
-#define __HOSTFN1_LPU1_MBOX_INFO_MK      0xfffffffe
-#define __HOSTFN1_LPU1_MBOX_INFO_SH      1
-#define __HOSTFN1_LPU1_MBOX_INFO(_v)     ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX_CMD_STATUS   0x00000001
-#define LPU1_HOSTFN1_CMD_STAT            0x0001901c
-#define __LPU1_HOSTFN1_MBOX_INFO_MK      0xfffffffe
-#define __LPU1_HOSTFN1_MBOX_INFO_SH      1
-#define __LPU1_HOSTFN1_MBOX_INFO(_v)     ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX_CMD_STATUS   0x00000001
-#define CPE_Q0_DEPTH                     0x00010014
-#define CPE_Q0_PI                        0x0001001c
-#define CPE_Q0_CI                        0x00010020
-#define CPE_Q1_DEPTH                     0x00010034
-#define CPE_Q1_PI                        0x0001003c
-#define CPE_Q1_CI                        0x00010040
-#define CPE_Q2_DEPTH                     0x00010054
-#define CPE_Q2_PI                        0x0001005c
-#define CPE_Q2_CI                        0x00010060
-#define CPE_Q3_DEPTH                     0x00010074
-#define CPE_Q3_PI                        0x0001007c
-#define CPE_Q3_CI                        0x00010080
-#define CPE_Q4_DEPTH                     0x00010094
-#define CPE_Q4_PI                        0x0001009c
-#define CPE_Q4_CI                        0x000100a0
-#define CPE_Q5_DEPTH                     0x000100b4
-#define CPE_Q5_PI                        0x000100bc
-#define CPE_Q5_CI                        0x000100c0
-#define CPE_Q6_DEPTH                     0x000100d4
-#define CPE_Q6_PI                        0x000100dc
-#define CPE_Q6_CI                        0x000100e0
-#define CPE_Q7_DEPTH                     0x000100f4
-#define CPE_Q7_PI                        0x000100fc
-#define CPE_Q7_CI                        0x00010100
-#define RME_Q0_DEPTH                     0x00011014
-#define RME_Q0_PI                        0x0001101c
-#define RME_Q0_CI                        0x00011020
-#define RME_Q1_DEPTH                     0x00011034
-#define RME_Q1_PI                        0x0001103c
-#define RME_Q1_CI                        0x00011040
-#define RME_Q2_DEPTH                     0x00011054
-#define RME_Q2_PI                        0x0001105c
-#define RME_Q2_CI                        0x00011060
-#define RME_Q3_DEPTH                     0x00011074
-#define RME_Q3_PI                        0x0001107c
-#define RME_Q3_CI                        0x00011080
-#define RME_Q4_DEPTH                     0x00011094
-#define RME_Q4_PI                        0x0001109c
-#define RME_Q4_CI                        0x000110a0
-#define RME_Q5_DEPTH                     0x000110b4
-#define RME_Q5_PI                        0x000110bc
-#define RME_Q5_CI                        0x000110c0
-#define RME_Q6_DEPTH                     0x000110d4
-#define RME_Q6_PI                        0x000110dc
-#define RME_Q6_CI                        0x000110e0
-#define RME_Q7_DEPTH                     0x000110f4
-#define RME_Q7_PI                        0x000110fc
-#define RME_Q7_CI                        0x00011100
-#define PSS_CTL_REG                      0x00018800
-#define __PSS_I2C_CLK_DIV_MK             0x00030000
-#define __PSS_I2C_CLK_DIV_SH             16
-#define __PSS_I2C_CLK_DIV(_v)            ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE             0x00001000
-#define __PSS_LMEM_RESET                 0x00000200
-#define __PSS_LMEM_INIT_EN               0x00000100
-#define __PSS_LPU1_RESET                 0x00000002
-#define __PSS_LPU0_RESET                 0x00000001
-#define PSS_ERR_STATUS_REG               0x00018810
-#define __PSS_LMEM1_CORR_ERR             0x00000800
-#define __PSS_LMEM0_CORR_ERR             0x00000400
-#define __PSS_LMEM1_UNCORR_ERR           0x00000200
-#define __PSS_LMEM0_UNCORR_ERR           0x00000100
-#define __PSS_BAL_PERR                   0x00000080
-#define __PSS_DIP_IF_ERR                 0x00000040
-#define __PSS_IOH_IF_ERR                 0x00000020
-#define __PSS_TDS_IF_ERR                 0x00000010
-#define __PSS_RDS_IF_ERR                 0x00000008
-#define __PSS_SGM_IF_ERR                 0x00000004
-#define __PSS_LPU1_RAM_ERR               0x00000002
-#define __PSS_LPU0_RAM_ERR               0x00000001
-#define ERR_SET_REG                      0x00018818
-#define __PSS_ERR_STATUS_SET             0x00000fff
-
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX          0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX          0x00000002
-#define __EMPHPRE_AT_4G_FIX              0x00000003
-#define __SFP_TXRATE_EN_FIX              0x00000100
-#define __SFP_RXRATE_EN_FIX              0x00000080
-
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-#define HOSTFN0_LPU_MBOX0_0              0x00019200
-#define HOSTFN1_LPU_MBOX0_8              0x00019260
-#define LPU_HOSTFN0_MBOX0_0              0x00019280
-#define LPU_HOSTFN1_MBOX0_8              0x000192e0
-
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG               HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG               HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG               HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG               HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT                 HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC               HOST_SEM5_INFO_REG
-
-#define CPE_Q_DEPTH(__n) \
-       (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
-#define CPE_Q_PI(__n) \
-       (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI))
-#define CPE_Q_CI(__n) \
-       (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI))
-#define RME_Q_DEPTH(__n) \
-       (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH))
-#define RME_Q_PI(__n) \
-       (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI))
-#define RME_Q_CI(__n) \
-       (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI))
-
-#define CPE_Q_NUM(__fn, __q)  (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q)  (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q)  ((__q) & 0x3)
-#define RME_Q_MASK(__q)  ((__q) & 0x3)
-
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
-    BFA_MSIX_CPE_Q0 = 0,
-    BFA_MSIX_CPE_Q1 = 1,
-    BFA_MSIX_CPE_Q2 = 2,
-    BFA_MSIX_CPE_Q3 = 3,
-    BFA_MSIX_CPE_Q4 = 4,
-    BFA_MSIX_CPE_Q5 = 5,
-    BFA_MSIX_CPE_Q6 = 6,
-    BFA_MSIX_CPE_Q7 = 7,
-    BFA_MSIX_RME_Q0 = 8,
-    BFA_MSIX_RME_Q1 = 9,
-    BFA_MSIX_RME_Q2 = 10,
-    BFA_MSIX_RME_Q3 = 11,
-    BFA_MSIX_RME_Q4 = 12,
-    BFA_MSIX_RME_Q5 = 13,
-    BFA_MSIX_RME_Q6 = 14,
-    BFA_MSIX_RME_Q7 = 15,
-    BFA_MSIX_ERR_EMC = 16,
-    BFA_MSIX_ERR_LPU0 = 17,
-    BFA_MSIX_ERR_LPU1 = 18,
-    BFA_MSIX_ERR_PSS = 19,
-    BFA_MSIX_MBOX_LPU0 = 20,
-    BFA_MSIX_MBOX_LPU1 = 21,
-    BFA_MSIX_CB_MAX = 22,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0                   0x00000001U
-#define __HFN_INT_CPE_Q1                   0x00000002U
-#define __HFN_INT_CPE_Q2                   0x00000004U
-#define __HFN_INT_CPE_Q3                   0x00000008U
-#define __HFN_INT_CPE_Q4                   0x00000010U
-#define __HFN_INT_CPE_Q5                   0x00000020U
-#define __HFN_INT_CPE_Q6                   0x00000040U
-#define __HFN_INT_CPE_Q7                   0x00000080U
-#define __HFN_INT_RME_Q0                   0x00000100U
-#define __HFN_INT_RME_Q1                   0x00000200U
-#define __HFN_INT_RME_Q2                   0x00000400U
-#define __HFN_INT_RME_Q3                   0x00000800U
-#define __HFN_INT_RME_Q4                   0x00001000U
-#define __HFN_INT_RME_Q5                   0x00002000U
-#define __HFN_INT_RME_Q6                   0x00004000U
-#define __HFN_INT_RME_Q7                   0x00008000U
-#define __HFN_INT_ERR_EMC                  0x00010000U
-#define __HFN_INT_ERR_LPU0                 0x00020000U
-#define __HFN_INT_ERR_LPU1                 0x00040000U
-#define __HFN_INT_ERR_PSS                  0x00080000U
-#define __HFN_INT_MBOX_LPU0                0x00100000U
-#define __HFN_INT_MBOX_LPU1                0x00200000U
-#define __HFN_INT_MBOX1_LPU0               0x00400000U
-#define __HFN_INT_MBOX1_LPU1               0x00800000U
-#define __HFN_INT_CPE_MASK                 0x000000ffU
-#define __HFN_INT_RME_MASK                 0x0000ff00U
-
-
-/*
- * crossbow memory map.
- */
-#define PSS_SMEM_PAGE_START    0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma)      ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma)    ((_ma) & 0x7fff)
-
-/*
- * End of crossbow memory map
- */
-
-
-#endif /* __BFI_CBREG_H__ */
-
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
deleted file mode 100644 (file)
index fc4ce4a..0000000
+++ /dev/null
@@ -1,636 +0,0 @@
-
-/*
- * bfi_ctreg.h catapult host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CTREG_H__
-#define __BFI_CTREG_H__
-
-
-#define HOSTFN0_LPU_MBOX0_0            0x00019200
-#define HOSTFN1_LPU_MBOX0_8            0x00019260
-#define LPU_HOSTFN0_MBOX0_0            0x00019280
-#define LPU_HOSTFN1_MBOX0_8            0x000192e0
-#define HOSTFN2_LPU_MBOX0_0            0x00019400
-#define HOSTFN3_LPU_MBOX0_8            0x00019460
-#define LPU_HOSTFN2_MBOX0_0            0x00019480
-#define LPU_HOSTFN3_MBOX0_8            0x000194e0
-#define HOSTFN0_INT_STATUS             0x00014000
-#define __HOSTFN0_HALT_OCCURRED                0x01000000
-#define __HOSTFN0_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH    20
-#define __HOSTFN0_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN0_INT_STATUS_P_SH      16
-#define __HOSTFN0_INT_STATUS_P(_v)     ((_v) << __HOSTFN0_INT_STATUS_P_SH)
-#define __HOSTFN0_INT_STATUS_F         0x0000ffff
-#define HOSTFN0_INT_MSK                        0x00014004
-#define HOST_PAGE_NUM_FN0              0x00014008
-#define __HOST_PAGE_NUM_FN             0x000001ff
-#define HOST_MSIX_ERR_INDEX_FN0                0x0001400c
-#define __MSIX_ERR_INDEX_FN            0x000001ff
-#define HOSTFN1_INT_STATUS             0x00014100
-#define __HOSTFN1_HALT_OCCURRED                0x01000000
-#define __HOSTFN1_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN1_INT_STATUS_LVL_SH    20
-#define __HOSTFN1_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
-#define __HOSTFN1_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN1_INT_STATUS_P_SH      16
-#define __HOSTFN1_INT_STATUS_P(_v)     ((_v) << __HOSTFN1_INT_STATUS_P_SH)
-#define __HOSTFN1_INT_STATUS_F         0x0000ffff
-#define HOSTFN1_INT_MSK                        0x00014104
-#define HOST_PAGE_NUM_FN1              0x00014108
-#define HOST_MSIX_ERR_INDEX_FN1                0x0001410c
-#define APP_PLL_425_CTL_REG            0x00014204
-#define __P_425_PLL_LOCK               0x80000000
-#define __APP_PLL_425_SRAM_USE_100MHZ  0x00100000
-#define __APP_PLL_425_RESET_TIMER_MK   0x000e0000
-#define __APP_PLL_425_RESET_TIMER_SH   17
-#define __APP_PLL_425_RESET_TIMER(_v)  ((_v) << __APP_PLL_425_RESET_TIMER_SH)
-#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_425_CNTLMT0_1_MK     0x0000c000
-#define __APP_PLL_425_CNTLMT0_1_SH     14
-#define __APP_PLL_425_CNTLMT0_1(_v)    ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
-#define __APP_PLL_425_JITLMT0_1_MK     0x00003000
-#define __APP_PLL_425_JITLMT0_1_SH     12
-#define __APP_PLL_425_JITLMT0_1(_v)    ((_v) << __APP_PLL_425_JITLMT0_1_SH)
-#define __APP_PLL_425_HREF             0x00000800
-#define __APP_PLL_425_HDIV             0x00000400
-#define __APP_PLL_425_P0_1_MK          0x00000300
-#define __APP_PLL_425_P0_1_SH          8
-#define __APP_PLL_425_P0_1(_v)         ((_v) << __APP_PLL_425_P0_1_SH)
-#define __APP_PLL_425_Z0_2_MK          0x000000e0
-#define __APP_PLL_425_Z0_2_SH          5
-#define __APP_PLL_425_Z0_2(_v)         ((_v) << __APP_PLL_425_Z0_2_SH)
-#define __APP_PLL_425_RSEL200500       0x00000010
-#define __APP_PLL_425_ENARST           0x00000008
-#define __APP_PLL_425_BYPASS           0x00000004
-#define __APP_PLL_425_LRESETN          0x00000002
-#define __APP_PLL_425_ENABLE           0x00000001
-#define APP_PLL_312_CTL_REG            0x00014208
-#define __P_312_PLL_LOCK               0x80000000
-#define __ENABLE_MAC_AHB_1             0x00800000
-#define __ENABLE_MAC_AHB_0             0x00400000
-#define __ENABLE_MAC_1                 0x00200000
-#define __ENABLE_MAC_0                 0x00100000
-#define __APP_PLL_312_RESET_TIMER_MK   0x000e0000
-#define __APP_PLL_312_RESET_TIMER_SH   17
-#define __APP_PLL_312_RESET_TIMER(_v)  ((_v) << __APP_PLL_312_RESET_TIMER_SH)
-#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_312_CNTLMT0_1_MK     0x0000c000
-#define __APP_PLL_312_CNTLMT0_1_SH     14
-#define __APP_PLL_312_CNTLMT0_1(_v)    ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
-#define __APP_PLL_312_JITLMT0_1_MK     0x00003000
-#define __APP_PLL_312_JITLMT0_1_SH     12
-#define __APP_PLL_312_JITLMT0_1(_v)    ((_v) << __APP_PLL_312_JITLMT0_1_SH)
-#define __APP_PLL_312_HREF             0x00000800
-#define __APP_PLL_312_HDIV             0x00000400
-#define __APP_PLL_312_P0_1_MK          0x00000300
-#define __APP_PLL_312_P0_1_SH          8
-#define __APP_PLL_312_P0_1(_v)         ((_v) << __APP_PLL_312_P0_1_SH)
-#define __APP_PLL_312_Z0_2_MK          0x000000e0
-#define __APP_PLL_312_Z0_2_SH          5
-#define __APP_PLL_312_Z0_2(_v)         ((_v) << __APP_PLL_312_Z0_2_SH)
-#define __APP_PLL_312_RSEL200500       0x00000010
-#define __APP_PLL_312_ENARST           0x00000008
-#define __APP_PLL_312_BYPASS           0x00000004
-#define __APP_PLL_312_LRESETN          0x00000002
-#define __APP_PLL_312_ENABLE           0x00000001
-#define MBIST_CTL_REG                  0x00014220
-#define __EDRAM_BISTR_START            0x00000004
-#define __MBIST_RESET                  0x00000002
-#define __MBIST_START                  0x00000001
-#define MBIST_STAT_REG                 0x00014224
-#define __EDRAM_BISTR_STATUS           0x00000008
-#define __EDRAM_BISTR_DONE             0x00000004
-#define __MEM_BIT_STATUS               0x00000002
-#define __MBIST_DONE                   0x00000001
-#define HOST_SEM0_REG                  0x00014230
-#define __HOST_SEMAPHORE               0x00000001
-#define HOST_SEM1_REG                  0x00014234
-#define HOST_SEM2_REG                  0x00014238
-#define HOST_SEM3_REG                  0x0001423c
-#define HOST_SEM0_INFO_REG             0x00014240
-#define HOST_SEM1_INFO_REG             0x00014244
-#define HOST_SEM2_INFO_REG             0x00014248
-#define HOST_SEM3_INFO_REG             0x0001424c
-#define ETH_MAC_SER_REG                        0x00014288
-#define __APP_EMS_CKBUFAMPIN           0x00000020
-#define __APP_EMS_REFCLKSEL            0x00000010
-#define __APP_EMS_CMLCKSEL             0x00000008
-#define __APP_EMS_REFCKBUFEN2          0x00000004
-#define __APP_EMS_REFCKBUFEN1          0x00000002
-#define __APP_EMS_CHANNEL_SEL          0x00000001
-#define HOSTFN2_INT_STATUS             0x00014300
-#define __HOSTFN2_HALT_OCCURRED                0x01000000
-#define __HOSTFN2_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN2_INT_STATUS_LVL_SH    20
-#define __HOSTFN2_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
-#define __HOSTFN2_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN2_INT_STATUS_P_SH      16
-#define __HOSTFN2_INT_STATUS_P(_v)     ((_v) << __HOSTFN2_INT_STATUS_P_SH)
-#define __HOSTFN2_INT_STATUS_F         0x0000ffff
-#define HOSTFN2_INT_MSK                        0x00014304
-#define HOST_PAGE_NUM_FN2              0x00014308
-#define HOST_MSIX_ERR_INDEX_FN2                0x0001430c
-#define HOSTFN3_INT_STATUS             0x00014400
-#define __HALT_OCCURRED                        0x01000000
-#define __HOSTFN3_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN3_INT_STATUS_LVL_SH    20
-#define __HOSTFN3_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
-#define __HOSTFN3_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN3_INT_STATUS_P_SH      16
-#define __HOSTFN3_INT_STATUS_P(_v)     ((_v) << __HOSTFN3_INT_STATUS_P_SH)
-#define __HOSTFN3_INT_STATUS_F         0x0000ffff
-#define HOSTFN3_INT_MSK                        0x00014404
-#define HOST_PAGE_NUM_FN3              0x00014408
-#define HOST_MSIX_ERR_INDEX_FN3                0x0001440c
-#define FNC_ID_REG                     0x00014600
-#define __FUNCTION_NUMBER              0x00000007
-#define FNC_PERS_REG                   0x00014604
-#define __F3_FUNCTION_ACTIVE           0x80000000
-#define __F3_FUNCTION_MODE             0x40000000
-#define __F3_PORT_MAP_MK               0x30000000
-#define __F3_PORT_MAP_SH               28
-#define __F3_PORT_MAP(_v)              ((_v) << __F3_PORT_MAP_SH)
-#define __F3_VM_MODE                   0x08000000
-#define __F3_INTX_STATUS_MK            0x07000000
-#define __F3_INTX_STATUS_SH            24
-#define __F3_INTX_STATUS(_v)           ((_v) << __F3_INTX_STATUS_SH)
-#define __F2_FUNCTION_ACTIVE           0x00800000
-#define __F2_FUNCTION_MODE             0x00400000
-#define __F2_PORT_MAP_MK               0x00300000
-#define __F2_PORT_MAP_SH               20
-#define __F2_PORT_MAP(_v)              ((_v) << __F2_PORT_MAP_SH)
-#define __F2_VM_MODE                   0x00080000
-#define __F2_INTX_STATUS_MK            0x00070000
-#define __F2_INTX_STATUS_SH            16
-#define __F2_INTX_STATUS(_v)           ((_v) << __F2_INTX_STATUS_SH)
-#define __F1_FUNCTION_ACTIVE           0x00008000
-#define __F1_FUNCTION_MODE             0x00004000
-#define __F1_PORT_MAP_MK               0x00003000
-#define __F1_PORT_MAP_SH               12
-#define __F1_PORT_MAP(_v)              ((_v) << __F1_PORT_MAP_SH)
-#define __F1_VM_MODE                   0x00000800
-#define __F1_INTX_STATUS_MK            0x00000700
-#define __F1_INTX_STATUS_SH            8
-#define __F1_INTX_STATUS(_v)           ((_v) << __F1_INTX_STATUS_SH)
-#define __F0_FUNCTION_ACTIVE           0x00000080
-#define __F0_FUNCTION_MODE             0x00000040
-#define __F0_PORT_MAP_MK               0x00000030
-#define __F0_PORT_MAP_SH               4
-#define __F0_PORT_MAP(_v)              ((_v) << __F0_PORT_MAP_SH)
-#define __F0_VM_MODE           0x00000008
-#define __F0_INTX_STATUS               0x00000007
-enum {
-       __F0_INTX_STATUS_MSIX           = 0x0,
-       __F0_INTX_STATUS_INTA           = 0x1,
-       __F0_INTX_STATUS_INTB           = 0x2,
-       __F0_INTX_STATUS_INTC           = 0x3,
-       __F0_INTX_STATUS_INTD           = 0x4,
-};
-#define OP_MODE                                0x0001460c
-#define __APP_ETH_CLK_LOWSPEED         0x00000004
-#define __GLOBAL_CORECLK_HALFSPEED     0x00000002
-#define __GLOBAL_FCOE_MODE             0x00000001
-#define HOST_SEM4_REG                  0x00014610
-#define HOST_SEM5_REG                  0x00014614
-#define HOST_SEM6_REG                  0x00014618
-#define HOST_SEM7_REG                  0x0001461c
-#define HOST_SEM4_INFO_REG             0x00014620
-#define HOST_SEM5_INFO_REG             0x00014624
-#define HOST_SEM6_INFO_REG             0x00014628
-#define HOST_SEM7_INFO_REG             0x0001462c
-#define HOSTFN0_LPU0_MBOX0_CMD_STAT    0x00019000
-#define __HOSTFN0_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN0_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN0_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN0_LPU1_MBOX0_CMD_STAT    0x00019004
-#define __HOSTFN0_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN0_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN0_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN0_MBOX0_CMD_STAT    0x00019008
-#define __LPU0_HOSTFN0_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN0_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN0_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN0_MBOX0_CMD_STAT    0x0001900c
-#define __LPU1_HOSTFN0_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN0_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN0_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU0_MBOX0_CMD_STAT    0x00019010
-#define __HOSTFN1_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN1_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN1_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU1_MBOX0_CMD_STAT    0x00019014
-#define __HOSTFN1_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN1_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN1_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN1_MBOX0_CMD_STAT    0x00019018
-#define __LPU0_HOSTFN1_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN1_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN1_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN1_MBOX0_CMD_STAT    0x0001901c
-#define __LPU1_HOSTFN1_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN1_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN1_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU0_MBOX0_CMD_STAT    0x00019150
-#define __HOSTFN2_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN2_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN2_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU1_MBOX0_CMD_STAT    0x00019154
-#define __HOSTFN2_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN2_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN2_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN2_MBOX0_CMD_STAT    0x00019158
-#define __LPU0_HOSTFN2_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN2_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN2_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN2_MBOX0_CMD_STAT    0x0001915c
-#define __LPU1_HOSTFN2_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN2_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN2_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU0_MBOX0_CMD_STAT    0x00019160
-#define __HOSTFN3_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN3_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN3_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU1_MBOX0_CMD_STAT    0x00019164
-#define __HOSTFN3_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN3_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN3_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN3_MBOX0_CMD_STAT    0x00019168
-#define __LPU0_HOSTFN3_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN3_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN3_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN3_MBOX0_CMD_STAT    0x0001916c
-#define __LPU1_HOSTFN3_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN3_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN3_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS        0x00000001
-#define FW_INIT_HALT_P0                        0x000191ac
-#define __FW_INIT_HALT_P               0x00000001
-#define FW_INIT_HALT_P1                        0x000191bc
-#define CPE_PI_PTR_Q0                  0x00038000
-#define __CPE_PI_UNUSED_MK             0xffff0000
-#define __CPE_PI_UNUSED_SH             16
-#define __CPE_PI_UNUSED(_v)            ((_v) << __CPE_PI_UNUSED_SH)
-#define __CPE_PI_PTR                   0x0000ffff
-#define CPE_PI_PTR_Q1                  0x00038040
-#define CPE_CI_PTR_Q0                  0x00038004
-#define __CPE_CI_UNUSED_MK             0xffff0000
-#define __CPE_CI_UNUSED_SH             16
-#define __CPE_CI_UNUSED(_v)            ((_v) << __CPE_CI_UNUSED_SH)
-#define __CPE_CI_PTR                   0x0000ffff
-#define CPE_CI_PTR_Q1                  0x00038044
-#define CPE_DEPTH_Q0                   0x00038008
-#define __CPE_DEPTH_UNUSED_MK          0xf8000000
-#define __CPE_DEPTH_UNUSED_SH          27
-#define __CPE_DEPTH_UNUSED(_v)         ((_v) << __CPE_DEPTH_UNUSED_SH)
-#define __CPE_MSIX_VEC_INDEX_MK                0x07ff0000
-#define __CPE_MSIX_VEC_INDEX_SH                16
-#define __CPE_MSIX_VEC_INDEX(_v)       ((_v) << __CPE_MSIX_VEC_INDEX_SH)
-#define __CPE_DEPTH                    0x0000ffff
-#define CPE_DEPTH_Q1                   0x00038048
-#define CPE_QCTRL_Q0                   0x0003800c
-#define __CPE_CTRL_UNUSED30_MK         0xfc000000
-#define __CPE_CTRL_UNUSED30_SH         26
-#define __CPE_CTRL_UNUSED30(_v)                ((_v) << __CPE_CTRL_UNUSED30_SH)
-#define __CPE_FUNC_INT_CTRL_MK         0x03000000
-#define __CPE_FUNC_INT_CTRL_SH         24
-#define __CPE_FUNC_INT_CTRL(_v)                ((_v) << __CPE_FUNC_INT_CTRL_SH)
-enum {
-       __CPE_FUNC_INT_CTRL_DISABLE             = 0x0,
-       __CPE_FUNC_INT_CTRL_F2NF                = 0x1,
-       __CPE_FUNC_INT_CTRL_3QUART              = 0x2,
-       __CPE_FUNC_INT_CTRL_HALF                = 0x3,
-};
-#define __CPE_CTRL_UNUSED20_MK         0x00f00000
-#define __CPE_CTRL_UNUSED20_SH         20
-#define __CPE_CTRL_UNUSED20(_v)                ((_v) << __CPE_CTRL_UNUSED20_SH)
-#define __CPE_SCI_TH_MK                        0x000f0000
-#define __CPE_SCI_TH_SH                        16
-#define __CPE_SCI_TH(_v)               ((_v) << __CPE_SCI_TH_SH)
-#define __CPE_CTRL_UNUSED10_MK         0x0000c000
-#define __CPE_CTRL_UNUSED10_SH         14
-#define __CPE_CTRL_UNUSED10(_v)                ((_v) << __CPE_CTRL_UNUSED10_SH)
-#define __CPE_ACK_PENDING              0x00002000
-#define __CPE_CTRL_UNUSED40_MK         0x00001c00
-#define __CPE_CTRL_UNUSED40_SH         10
-#define __CPE_CTRL_UNUSED40(_v)                ((_v) << __CPE_CTRL_UNUSED40_SH)
-#define __CPE_PCIEID_MK                        0x00000300
-#define __CPE_PCIEID_SH                        8
-#define __CPE_PCIEID(_v)               ((_v) << __CPE_PCIEID_SH)
-#define __CPE_CTRL_UNUSED00_MK         0x000000fe
-#define __CPE_CTRL_UNUSED00_SH         1
-#define __CPE_CTRL_UNUSED00(_v)                ((_v) << __CPE_CTRL_UNUSED00_SH)
-#define __CPE_ESIZE                    0x00000001
-#define CPE_QCTRL_Q1                   0x0003804c
-#define __CPE_CTRL_UNUSED31_MK         0xfc000000
-#define __CPE_CTRL_UNUSED31_SH         26
-#define __CPE_CTRL_UNUSED31(_v)                ((_v) << __CPE_CTRL_UNUSED31_SH)
-#define __CPE_CTRL_UNUSED21_MK         0x00f00000
-#define __CPE_CTRL_UNUSED21_SH         20
-#define __CPE_CTRL_UNUSED21(_v)                ((_v) << __CPE_CTRL_UNUSED21_SH)
-#define __CPE_CTRL_UNUSED11_MK         0x0000c000
-#define __CPE_CTRL_UNUSED11_SH         14
-#define __CPE_CTRL_UNUSED11(_v)                ((_v) << __CPE_CTRL_UNUSED11_SH)
-#define __CPE_CTRL_UNUSED41_MK         0x00001c00
-#define __CPE_CTRL_UNUSED41_SH         10
-#define __CPE_CTRL_UNUSED41(_v)                ((_v) << __CPE_CTRL_UNUSED41_SH)
-#define __CPE_CTRL_UNUSED01_MK         0x000000fe
-#define __CPE_CTRL_UNUSED01_SH         1
-#define __CPE_CTRL_UNUSED01(_v)                ((_v) << __CPE_CTRL_UNUSED01_SH)
-#define RME_PI_PTR_Q0                  0x00038020
-#define __LATENCY_TIME_STAMP_MK                0xffff0000
-#define __LATENCY_TIME_STAMP_SH                16
-#define __LATENCY_TIME_STAMP(_v)       ((_v) << __LATENCY_TIME_STAMP_SH)
-#define __RME_PI_PTR                   0x0000ffff
-#define RME_PI_PTR_Q1                  0x00038060
-#define RME_CI_PTR_Q0                  0x00038024
-#define __DELAY_TIME_STAMP_MK          0xffff0000
-#define __DELAY_TIME_STAMP_SH          16
-#define __DELAY_TIME_STAMP(_v)         ((_v) << __DELAY_TIME_STAMP_SH)
-#define __RME_CI_PTR                   0x0000ffff
-#define RME_CI_PTR_Q1                  0x00038064
-#define RME_DEPTH_Q0                   0x00038028
-#define __RME_DEPTH_UNUSED_MK          0xf8000000
-#define __RME_DEPTH_UNUSED_SH          27
-#define __RME_DEPTH_UNUSED(_v)         ((_v) << __RME_DEPTH_UNUSED_SH)
-#define __RME_MSIX_VEC_INDEX_MK                0x07ff0000
-#define __RME_MSIX_VEC_INDEX_SH                16
-#define __RME_MSIX_VEC_INDEX(_v)       ((_v) << __RME_MSIX_VEC_INDEX_SH)
-#define __RME_DEPTH                    0x0000ffff
-#define RME_DEPTH_Q1                   0x00038068
-#define RME_QCTRL_Q0                   0x0003802c
-#define __RME_INT_LATENCY_TIMER_MK     0xff000000
-#define __RME_INT_LATENCY_TIMER_SH     24
-#define __RME_INT_LATENCY_TIMER(_v)    ((_v) << __RME_INT_LATENCY_TIMER_SH)
-#define __RME_INT_DELAY_TIMER_MK       0x00ff0000
-#define __RME_INT_DELAY_TIMER_SH       16
-#define __RME_INT_DELAY_TIMER(_v)      ((_v) << __RME_INT_DELAY_TIMER_SH)
-#define __RME_INT_DELAY_DISABLE                0x00008000
-#define __RME_DLY_DELAY_DISABLE                0x00004000
-#define __RME_ACK_PENDING              0x00002000
-#define __RME_FULL_INTERRUPT_DISABLE   0x00001000
-#define __RME_CTRL_UNUSED10_MK         0x00000c00
-#define __RME_CTRL_UNUSED10_SH         10
-#define __RME_CTRL_UNUSED10(_v)                ((_v) << __RME_CTRL_UNUSED10_SH)
-#define __RME_PCIEID_MK                        0x00000300
-#define __RME_PCIEID_SH                        8
-#define __RME_PCIEID(_v)               ((_v) << __RME_PCIEID_SH)
-#define __RME_CTRL_UNUSED00_MK         0x000000fe
-#define __RME_CTRL_UNUSED00_SH         1
-#define __RME_CTRL_UNUSED00(_v)                ((_v) << __RME_CTRL_UNUSED00_SH)
-#define __RME_ESIZE                    0x00000001
-#define RME_QCTRL_Q1                   0x0003806c
-#define __RME_CTRL_UNUSED11_MK         0x00000c00
-#define __RME_CTRL_UNUSED11_SH         10
-#define __RME_CTRL_UNUSED11(_v)                ((_v) << __RME_CTRL_UNUSED11_SH)
-#define __RME_CTRL_UNUSED01_MK         0x000000fe
-#define __RME_CTRL_UNUSED01_SH         1
-#define __RME_CTRL_UNUSED01(_v)                ((_v) << __RME_CTRL_UNUSED01_SH)
-#define PSS_CTL_REG                    0x00018800
-#define __PSS_I2C_CLK_DIV_MK           0x007f0000
-#define __PSS_I2C_CLK_DIV_SH           16
-#define __PSS_I2C_CLK_DIV(_v)          ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE           0x00001000
-#define __PSS_LMEM_RESET               0x00000200
-#define __PSS_LMEM_INIT_EN             0x00000100
-#define __PSS_LPU1_RESET               0x00000002
-#define __PSS_LPU0_RESET               0x00000001
-#define PSS_ERR_STATUS_REG             0x00018810
-#define __PSS_LPU1_TCM_READ_ERR                0x00200000
-#define __PSS_LPU0_TCM_READ_ERR                0x00100000
-#define __PSS_LMEM5_CORR_ERR           0x00080000
-#define __PSS_LMEM4_CORR_ERR           0x00040000
-#define __PSS_LMEM3_CORR_ERR           0x00020000
-#define __PSS_LMEM2_CORR_ERR           0x00010000
-#define __PSS_LMEM1_CORR_ERR           0x00008000
-#define __PSS_LMEM0_CORR_ERR           0x00004000
-#define __PSS_LMEM5_UNCORR_ERR         0x00002000
-#define __PSS_LMEM4_UNCORR_ERR         0x00001000
-#define __PSS_LMEM3_UNCORR_ERR         0x00000800
-#define __PSS_LMEM2_UNCORR_ERR         0x00000400
-#define __PSS_LMEM1_UNCORR_ERR         0x00000200
-#define __PSS_LMEM0_UNCORR_ERR         0x00000100
-#define __PSS_BAL_PERR                 0x00000080
-#define __PSS_DIP_IF_ERR               0x00000040
-#define __PSS_IOH_IF_ERR               0x00000020
-#define __PSS_TDS_IF_ERR               0x00000010
-#define __PSS_RDS_IF_ERR               0x00000008
-#define __PSS_SGM_IF_ERR               0x00000004
-#define __PSS_LPU1_RAM_ERR             0x00000002
-#define __PSS_LPU0_RAM_ERR             0x00000001
-#define ERR_SET_REG                    0x00018818
-#define __PSS_ERR_STATUS_SET           0x003fffff
-#define PMM_1T_RESET_REG_P0            0x0002381c
-#define __PMM_1T_RESET_P               0x00000001
-#define PMM_1T_RESET_REG_P1            0x00023c1c
-#define HQM_QSET0_RXQ_DRBL_P0          0x00038000
-#define __RXQ0_ADD_VECTORS_P           0x80000000
-#define __RXQ0_STOP_P                  0x40000000
-#define __RXQ0_PRD_PTR_P               0x0000ffff
-#define HQM_QSET1_RXQ_DRBL_P0          0x00038080
-#define __RXQ1_ADD_VECTORS_P           0x80000000
-#define __RXQ1_STOP_P                  0x40000000
-#define __RXQ1_PRD_PTR_P               0x0000ffff
-#define HQM_QSET0_RXQ_DRBL_P1          0x0003c000
-#define HQM_QSET1_RXQ_DRBL_P1          0x0003c080
-#define HQM_QSET0_TXQ_DRBL_P0          0x00038020
-#define __TXQ0_ADD_VECTORS_P           0x80000000
-#define __TXQ0_STOP_P                  0x40000000
-#define __TXQ0_PRD_PTR_P               0x0000ffff
-#define HQM_QSET1_TXQ_DRBL_P0          0x000380a0
-#define __TXQ1_ADD_VECTORS_P           0x80000000
-#define __TXQ1_STOP_P                  0x40000000
-#define __TXQ1_PRD_PTR_P               0x0000ffff
-#define HQM_QSET0_TXQ_DRBL_P1          0x0003c020
-#define HQM_QSET1_TXQ_DRBL_P1          0x0003c0a0
-#define HQM_QSET0_IB_DRBL_1_P0         0x00038040
-#define __IB1_0_ACK_P                  0x80000000
-#define __IB1_0_DISABLE_P              0x40000000
-#define __IB1_0_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB1_0_COALESCING_CFG_P_SH    16
-#define __IB1_0_COALESCING_CFG_P(_v)   ((_v) << __IB1_0_COALESCING_CFG_P_SH)
-#define __IB1_0_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET1_IB_DRBL_1_P0         0x000380c0
-#define __IB1_1_ACK_P                  0x80000000
-#define __IB1_1_DISABLE_P              0x40000000
-#define __IB1_1_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB1_1_COALESCING_CFG_P_SH    16
-#define __IB1_1_COALESCING_CFG_P(_v)   ((_v) << __IB1_1_COALESCING_CFG_P_SH)
-#define __IB1_1_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET0_IB_DRBL_1_P1         0x0003c040
-#define HQM_QSET1_IB_DRBL_1_P1         0x0003c0c0
-#define HQM_QSET0_IB_DRBL_2_P0         0x00038060
-#define __IB2_0_ACK_P                  0x80000000
-#define __IB2_0_DISABLE_P              0x40000000
-#define __IB2_0_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB2_0_COALESCING_CFG_P_SH    16
-#define __IB2_0_COALESCING_CFG_P(_v)   ((_v) << __IB2_0_COALESCING_CFG_P_SH)
-#define __IB2_0_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET1_IB_DRBL_2_P0         0x000380e0
-#define __IB2_1_ACK_P                  0x80000000
-#define __IB2_1_DISABLE_P              0x40000000
-#define __IB2_1_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB2_1_COALESCING_CFG_P_SH    16
-#define __IB2_1_COALESCING_CFG_P(_v)   ((_v) << __IB2_1_COALESCING_CFG_P_SH)
-#define __IB2_1_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET0_IB_DRBL_2_P1         0x0003c060
-#define HQM_QSET1_IB_DRBL_2_P1         0x0003c0e0
-
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX                0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX                0x00000002
-#define __EMPHPRE_AT_4G_FIX            0x00000003
-#define __SFP_TXRATE_EN_FIX            0x00000100
-#define __SFP_RXRATE_EN_FIX            0x00000080
-
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG             HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG             HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG             HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG             HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT                HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC              HOST_SEM5_INFO_REG
-
-#define CPE_DEPTH_Q(__n) \
-       (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
-#define CPE_QCTRL_Q(__n) \
-       (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
-#define CPE_PI_PTR_Q(__n) \
-       (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
-#define CPE_CI_PTR_Q(__n) \
-       (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
-#define RME_DEPTH_Q(__n) \
-       (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
-#define RME_QCTRL_Q(__n) \
-       (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
-#define RME_PI_PTR_Q(__n) \
-       (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
-#define RME_CI_PTR_Q(__n) \
-       (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) \
-       (HQM_QSET0_RXQ_DRBL_P0 + (__n) *        \
-       (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) \
-       (HQM_QSET0_TXQ_DRBL_P0 + (__n) *        \
-       (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) \
-       (HQM_QSET0_IB_DRBL_1_P0 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) \
-       (HQM_QSET0_IB_DRBL_2_P0 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) \
-       (HQM_QSET0_RXQ_DRBL_P1 + (__n) *        \
-       (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) \
-       (HQM_QSET0_TXQ_DRBL_P1 + (__n) *        \
-       (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) \
-       (HQM_QSET0_IB_DRBL_1_P1 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) \
-       (HQM_QSET0_IB_DRBL_2_P1 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
-
-#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q) ((__q) & 0x3)
-#define RME_Q_MASK(__q) ((__q) & 0x3)
-
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
-       BFA_MSIX_CPE_Q0 = 0,
-       BFA_MSIX_CPE_Q1 = 1,
-       BFA_MSIX_CPE_Q2 = 2,
-       BFA_MSIX_CPE_Q3 = 3,
-       BFA_MSIX_RME_Q0 = 4,
-       BFA_MSIX_RME_Q1 = 5,
-       BFA_MSIX_RME_Q2 = 6,
-       BFA_MSIX_RME_Q3 = 7,
-       BFA_MSIX_LPU_ERR = 8,
-       BFA_MSIX_CT_MAX = 9,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0               0x00000001U
-#define __HFN_INT_CPE_Q1               0x00000002U
-#define __HFN_INT_CPE_Q2               0x00000004U
-#define __HFN_INT_CPE_Q3               0x00000008U
-#define __HFN_INT_CPE_Q4               0x00000010U
-#define __HFN_INT_CPE_Q5               0x00000020U
-#define __HFN_INT_CPE_Q6               0x00000040U
-#define __HFN_INT_CPE_Q7               0x00000080U
-#define __HFN_INT_RME_Q0               0x00000100U
-#define __HFN_INT_RME_Q1               0x00000200U
-#define __HFN_INT_RME_Q2               0x00000400U
-#define __HFN_INT_RME_Q3               0x00000800U
-#define __HFN_INT_RME_Q4               0x00001000U
-#define __HFN_INT_RME_Q5               0x00002000U
-#define __HFN_INT_RME_Q6               0x00004000U
-#define __HFN_INT_RME_Q7               0x00008000U
-#define __HFN_INT_ERR_EMC              0x00010000U
-#define __HFN_INT_ERR_LPU0             0x00020000U
-#define __HFN_INT_ERR_LPU1             0x00040000U
-#define __HFN_INT_ERR_PSS              0x00080000U
-#define __HFN_INT_MBOX_LPU0            0x00100000U
-#define __HFN_INT_MBOX_LPU1            0x00200000U
-#define __HFN_INT_MBOX1_LPU0           0x00400000U
-#define __HFN_INT_MBOX1_LPU1           0x00800000U
-#define __HFN_INT_LL_HALT              0x01000000U
-#define __HFN_INT_CPE_MASK             0x000000ffU
-#define __HFN_INT_RME_MASK             0x0000ff00U
-
-
-/*
- * catapult memory map.
- */
-#define LL_PGN_HQM0            0x0096
-#define LL_PGN_HQM1            0x0097
-#define PSS_SMEM_PAGE_START    0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma)      ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma)    ((_ma) & 0x7fff)
-
-/*
- * End of catapult memory map
- */
-
-
-#endif /* __BFI_CTREG_H__ */
index 19e888a..0d9f1fb 100644 (file)
@@ -28,11 +28,17 @@ enum bfi_iocfc_h2i_msgs {
        BFI_IOCFC_H2I_CFG_REQ           = 1,
        BFI_IOCFC_H2I_SET_INTR_REQ      = 2,
        BFI_IOCFC_H2I_UPDATEQ_REQ       = 3,
+       BFI_IOCFC_H2I_FAA_ENABLE_REQ    = 4,
+       BFI_IOCFC_H2I_FAA_DISABLE_REQ   = 5,
+       BFI_IOCFC_H2I_FAA_QUERY_REQ     = 6,
 };
 
 enum bfi_iocfc_i2h_msgs {
        BFI_IOCFC_I2H_CFG_REPLY         = BFA_I2HM(1),
        BFI_IOCFC_I2H_UPDATEQ_RSP       = BFA_I2HM(3),
+       BFI_IOCFC_I2H_FAA_ENABLE_RSP    = BFA_I2HM(4),
+       BFI_IOCFC_I2H_FAA_DISABLE_RSP   = BFA_I2HM(5),
+       BFI_IOCFC_I2H_FAA_QUERY_RSP     = BFA_I2HM(6),
 };
 
 struct bfi_iocfc_cfg_s {
@@ -40,6 +46,12 @@ struct bfi_iocfc_cfg_s {
        u8       sense_buf_len; /*  SCSI sense length       */
        u16     rsvd_1;
        u32     endian_sig;     /*  endian signature of host     */
+       u8      rsvd_2;
+       u8      single_msix_vec;
+       u8      rsvd[2];
+       __be16  num_ioim_reqs;
+       __be16  num_fwtio_reqs;
+
 
        /*
         * Request and response circular queue base addresses, size and
@@ -54,7 +66,8 @@ struct bfi_iocfc_cfg_s {
 
        union bfi_addr_u  stats_addr;   /*  DMA-able address for stats    */
        union bfi_addr_u  cfgrsp_addr;  /*  config response dma address  */
-       union bfi_addr_u  ioim_snsbase;  /*  IO sense buffer base address */
+       union bfi_addr_u  ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
+                                       /*  IO sense buf base addr segments */
        struct bfa_iocfc_intr_attr_s intr_attr; /*  IOC interrupt attributes */
 };
 
@@ -68,11 +81,25 @@ struct bfi_iocfc_bootwwns {
        u8              rsvd[7];
 };
 
+/**
+ * Queue configuration response from firmware
+ */
+struct bfi_iocfc_qreg_s {
+       u32     cpe_q_ci_off[BFI_IOC_MAX_CQS];
+       u32     cpe_q_pi_off[BFI_IOC_MAX_CQS];
+       u32     cpe_qctl_off[BFI_IOC_MAX_CQS];
+       u32     rme_q_ci_off[BFI_IOC_MAX_CQS];
+       u32     rme_q_pi_off[BFI_IOC_MAX_CQS];
+       u32     rme_qctl_off[BFI_IOC_MAX_CQS];
+       u8      hw_qid[BFI_IOC_MAX_CQS];
+};
+
 struct bfi_iocfc_cfgrsp_s {
        struct bfa_iocfc_fwcfg_s        fwcfg;
        struct bfa_iocfc_intr_attr_s    intr_attr;
        struct bfi_iocfc_bootwwns       bootwwns;
        struct bfi_pbc_s                pbc_cfg;
+       struct bfi_iocfc_qreg_s         qreg;
 };
 
 /*
@@ -150,6 +177,37 @@ union bfi_iocfc_i2h_msg_u {
        u32 mboxmsg[BFI_IOC_MSGSZ];
 };
 
+/*
+ * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message
+ */
+struct bfi_faa_en_dis_s {
+       struct bfi_mhdr_s mh;   /* common msg header    */
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_QUERY_REQ message
+ */
+struct bfi_faa_query_s {
+       struct bfi_mhdr_s mh;   /* common msg header    */
+       u8      faa_status;     /* FAA status           */
+       u8      addr_source;    /* PWWN source          */
+       u8      rsvd[2];
+       wwn_t   faa;            /* Fabric acquired PWWN */
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message
+ */
+struct bfi_faa_en_dis_rsp_s {
+       struct bfi_mhdr_s mh;   /* common msg header    */
+       u8      status;         /* updateq  status      */
+       u8      rsvd[3];
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_QUERY_RSP message
+ */
+#define bfi_faa_query_rsp_t struct bfi_faa_query_s
 
 enum bfi_fcport_h2i {
        BFI_FCPORT_H2I_ENABLE_REQ               = (1),
@@ -213,7 +271,8 @@ struct bfi_fcport_enable_req_s {
 struct bfi_fcport_set_svc_params_req_s {
        struct bfi_mhdr_s  mh;          /*  msg header */
        __be16     tx_bbcredit; /*  Tx credits */
-       u16        rsvd;
+       u8      bb_scn;         /* BB_SC FC credit recovery */
+       u8      rsvd;
 };
 
 /*
@@ -293,12 +352,12 @@ struct bfi_fcxp_send_req_s {
        u8       class;         /*  FC class used for req/rsp       */
        u8       rsp_timeout;   /*  timeout in secs, 0-no response */
        u8       cts;           /*  continue sequence               */
-       u8       lp_tag;        /*  lport tag                       */
+       u8       lp_fwtag;      /*  lport tag                       */
        struct fchs_s   fchs;   /*  request FC header structure    */
        __be32  req_len;        /*  request payload length          */
        __be32  rsp_maxlen;     /*  max response length expected   */
-       struct bfi_sge_s   req_sge[BFA_FCXP_MAX_SGES];  /*  request buf    */
-       struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];  /*  response buf   */
+       struct bfi_alen_s req_alen;     /* request buffer       */
+       struct bfi_alen_s rsp_alen;     /* response buffer      */
 };
 
 /*
@@ -328,7 +387,7 @@ struct bfi_uf_buf_post_s {
        struct bfi_mhdr_s  mh;          /*  Common msg header           */
        u16     buf_tag;        /*  buffer tag                  */
        __be16  buf_len;        /*  total buffer length */
-       struct bfi_sge_s   sge[BFA_UF_MAX_SGES]; /*  buffer DMA SGEs    */
+       struct bfi_alen_s alen; /* buffer address/len pair      */
 };
 
 struct bfi_uf_frm_rcvd_s {
@@ -346,26 +405,27 @@ enum bfi_lps_h2i_msgs {
 };
 
 enum bfi_lps_i2h_msgs {
-       BFI_LPS_H2I_LOGIN_RSP   = BFA_I2HM(1),
-       BFI_LPS_H2I_LOGOUT_RSP  = BFA_I2HM(2),
-       BFI_LPS_H2I_CVL_EVENT   = BFA_I2HM(3),
+       BFI_LPS_I2H_LOGIN_RSP   = BFA_I2HM(1),
+       BFI_LPS_I2H_LOGOUT_RSP  = BFA_I2HM(2),
+       BFI_LPS_I2H_CVL_EVENT   = BFA_I2HM(3),
 };
 
 struct bfi_lps_login_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              bfa_tag;
        u8              alpa;
        __be16          pdu_size;
        wwn_t           pwwn;
        wwn_t           nwwn;
        u8              fdisc;
        u8              auth_en;
-       u8              rsvd[2];
+       u8              lps_role;
+       u8              bb_scn;
 };
 
 struct bfi_lps_login_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              fw_tag;
        u8              status;
        u8              lsrjt_rsn;
        u8              lsrjt_expl;
@@ -380,31 +440,33 @@ struct bfi_lps_login_rsp_s {
        mac_t           fcf_mac;
        u8              ext_status;
        u8              brcd_switch;    /*  attached peer is brcd switch */
+       u8              bb_scn;         /* atatched port's bb_scn */
+       u8              bfa_tag;
 };
 
 struct bfi_lps_logout_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              fw_tag;
        u8              rsvd[3];
        wwn_t           port_name;
 };
 
 struct bfi_lps_logout_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              bfa_tag;
        u8              status;
        u8              rsvd[2];
 };
 
 struct bfi_lps_cvl_event_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              bfa_tag;
        u8              rsvd[3];
 };
 
 struct bfi_lps_n2n_pid_req_s {
        struct bfi_mhdr_s       mh;     /*  common msg header           */
-       u8      lp_tag;
+       u8      fw_tag;
        u32     lp_pid:24;
 };
 
@@ -439,7 +501,7 @@ struct bfi_rport_create_req_s {
        u16     bfa_handle;     /*  host rport handle           */
        __be16  max_frmsz;      /*  max rcv pdu size            */
        u32     pid:24, /*  remote port ID              */
-               lp_tag:8;       /*  local port tag              */
+               lp_fwtag:8;     /*  local port tag              */
        u32     local_pid:24,   /*  local port ID               */
                cisc:8;
        u8      fc_class;       /*  supported FC classes        */
@@ -502,62 +564,63 @@ union bfi_rport_i2h_msg_u {
  * Initiator mode I-T nexus interface defines.
  */
 
-enum bfi_itnim_h2i {
-       BFI_ITNIM_H2I_CREATE_REQ = 1,   /*  i-t nexus creation */
-       BFI_ITNIM_H2I_DELETE_REQ = 2,   /*  i-t nexus deletion */
+enum bfi_itn_h2i {
+       BFI_ITN_H2I_CREATE_REQ = 1,     /*  i-t nexus creation */
+       BFI_ITN_H2I_DELETE_REQ = 2,     /*  i-t nexus deletion */
 };
 
-enum bfi_itnim_i2h {
-       BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
-       BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
-       BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
+enum bfi_itn_i2h {
+       BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1),
+       BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2),
+       BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3),
 };
 
-struct bfi_itnim_create_req_s {
+struct bfi_itn_create_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     fw_handle;      /*  f/w handle for itnim         */
        u8      class;          /*  FC class for IO              */
        u8      seq_rec;        /*  sequence recovery support    */
        u8      msg_no;         /*  seq id of the msg            */
+       u8      role;
 };
 
-struct bfi_itnim_create_rsp_s {
+struct bfi_itn_create_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     bfa_handle;     /*  bfa handle for itnim         */
        u8      status;         /*  fcp request status           */
        u8      seq_id;         /*  seq id of the msg            */
 };
 
-struct bfi_itnim_delete_req_s {
+struct bfi_itn_delete_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     fw_handle;      /*  f/w itnim handle             */
        u8      seq_id;         /*  seq id of the msg            */
        u8      rsvd;
 };
 
-struct bfi_itnim_delete_rsp_s {
+struct bfi_itn_delete_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     bfa_handle;     /*  bfa handle for itnim         */
        u8      status;         /*  fcp request status           */
        u8      seq_id;         /*  seq id of the msg            */
 };
 
-struct bfi_itnim_sler_event_s {
+struct bfi_itn_sler_event_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     bfa_handle;     /*  bfa handle for itnim         */
        u16     rsvd;
 };
 
-union bfi_itnim_h2i_msg_u {
-       struct bfi_itnim_create_req_s *create_req;
-       struct bfi_itnim_delete_req_s *delete_req;
+union bfi_itn_h2i_msg_u {
+       struct bfi_itn_create_req_s *create_req;
+       struct bfi_itn_delete_req_s *delete_req;
        struct bfi_msg_s        *msg;
 };
 
-union bfi_itnim_i2h_msg_u {
-       struct bfi_itnim_create_rsp_s *create_rsp;
-       struct bfi_itnim_delete_rsp_s *delete_rsp;
-       struct bfi_itnim_sler_event_s *sler_event;
+union bfi_itn_i2h_msg_u {
+       struct bfi_itn_create_rsp_s *create_rsp;
+       struct bfi_itn_delete_rsp_s *delete_rsp;
+       struct bfi_itn_sler_event_s *sler_event;
        struct bfi_msg_s        *msg;
 };
 
@@ -693,7 +756,6 @@ enum bfi_ioim_status {
        BFI_IOIM_STS_PATHTOV = 8,
 };
 
-#define BFI_IOIM_SNSLEN        (256)
 /*
  * I/O response message
  */
@@ -772,4 +834,27 @@ struct bfi_tskim_rsp_s {
 
 #pragma pack()
 
+/*
+ * Crossbow PCI MSI-X vector defines
+ */
+enum {
+       BFI_MSIX_CPE_QMIN_CB = 0,
+       BFI_MSIX_CPE_QMAX_CB = 7,
+       BFI_MSIX_RME_QMIN_CB = 8,
+       BFI_MSIX_RME_QMAX_CB = 15,
+       BFI_MSIX_CB_MAX = 22,
+};
+
+/*
+ * Catapult FC PCI MSI-X vector defines
+ */
+enum {
+       BFI_MSIX_LPU_ERR_CT = 0,
+       BFI_MSIX_CPE_QMIN_CT = 1,
+       BFI_MSIX_CPE_QMAX_CT = 4,
+       BFI_MSIX_RME_QMIN_CT = 5,
+       BFI_MSIX_RME_QMAX_CT = 8,
+       BFI_MSIX_CT_MAX = 9,
+};
+
 #endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644 (file)
index 0000000..d892064
--- /dev/null
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS             0x00014000      /* cb/ct        */
+#define HOSTFN1_INT_STATUS             0x00014100      /* cb/ct        */
+#define HOSTFN2_INT_STATUS             0x00014300      /* ct           */
+#define HOSTFN3_INT_STATUS             0x00014400      /* ct           */
+#define HOSTFN0_INT_MSK                        0x00014004      /* cb/ct        */
+#define HOSTFN1_INT_MSK                        0x00014104      /* cb/ct        */
+#define HOSTFN2_INT_MSK                        0x00014304      /* ct           */
+#define HOSTFN3_INT_MSK                        0x00014404      /* ct           */
+
+#define HOST_PAGE_NUM_FN0              0x00014008      /* cb/ct        */
+#define HOST_PAGE_NUM_FN1              0x00014108      /* cb/ct        */
+#define HOST_PAGE_NUM_FN2              0x00014308      /* ct           */
+#define HOST_PAGE_NUM_FN3              0x00014408      /* ct           */
+
+#define APP_PLL_LCLK_CTL_REG           0x00014204      /* cb/ct        */
+#define __P_LCLK_PLL_LOCK              0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK  0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH  17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET        0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK    0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH    14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v)   ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK    0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH    12
+#define __APP_PLL_LCLK_JITLMT0_1(_v)   ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF            0x00000800
+#define __APP_PLL_LCLK_HDIV            0x00000400
+#define __APP_PLL_LCLK_P0_1_MK         0x00000300
+#define __APP_PLL_LCLK_P0_1_SH         8
+#define __APP_PLL_LCLK_P0_1(_v)                ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK         0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH         5
+#define __APP_PLL_LCLK_Z0_2(_v)                ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500      0x00000010
+#define __APP_PLL_LCLK_ENARST          0x00000008
+#define __APP_PLL_LCLK_BYPASS          0x00000004
+#define __APP_PLL_LCLK_LRESETN         0x00000002
+#define __APP_PLL_LCLK_ENABLE          0x00000001
+#define APP_PLL_SCLK_CTL_REG           0x00014208      /* cb/ct        */
+#define __P_SCLK_PLL_LOCK              0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK  0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH  17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET        0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK    0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH    14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v)   ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK    0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH    12
+#define __APP_PLL_SCLK_JITLMT0_1(_v)   ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF            0x00000800
+#define __APP_PLL_SCLK_HDIV            0x00000400
+#define __APP_PLL_SCLK_P0_1_MK         0x00000300
+#define __APP_PLL_SCLK_P0_1_SH         8
+#define __APP_PLL_SCLK_P0_1(_v)                ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK         0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH         5
+#define __APP_PLL_SCLK_Z0_2(_v)                ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500      0x00000010
+#define __APP_PLL_SCLK_ENARST          0x00000008
+#define __APP_PLL_SCLK_BYPASS          0x00000004
+#define __APP_PLL_SCLK_LRESETN         0x00000002
+#define __APP_PLL_SCLK_ENABLE          0x00000001
+#define __ENABLE_MAC_AHB_1             0x00800000      /* ct           */
+#define __ENABLE_MAC_AHB_0             0x00400000      /* ct           */
+#define __ENABLE_MAC_1                 0x00200000      /* ct           */
+#define __ENABLE_MAC_0                 0x00100000      /* ct           */
+
+#define HOST_SEM0_REG                  0x00014230      /* cb/ct        */
+#define HOST_SEM1_REG                  0x00014234      /* cb/ct        */
+#define HOST_SEM2_REG                  0x00014238      /* cb/ct        */
+#define HOST_SEM3_REG                  0x0001423c      /* cb/ct        */
+#define HOST_SEM4_REG                  0x00014610      /* cb/ct        */
+#define HOST_SEM5_REG                  0x00014614      /* cb/ct        */
+#define HOST_SEM6_REG                  0x00014618      /* cb/ct        */
+#define HOST_SEM7_REG                  0x0001461c      /* cb/ct        */
+#define HOST_SEM0_INFO_REG             0x00014240      /* cb/ct        */
+#define HOST_SEM1_INFO_REG             0x00014244      /* cb/ct        */
+#define HOST_SEM2_INFO_REG             0x00014248      /* cb/ct        */
+#define HOST_SEM3_INFO_REG             0x0001424c      /* cb/ct        */
+#define HOST_SEM4_INFO_REG             0x00014620      /* cb/ct        */
+#define HOST_SEM5_INFO_REG             0x00014624      /* cb/ct        */
+#define HOST_SEM6_INFO_REG             0x00014628      /* cb/ct        */
+#define HOST_SEM7_INFO_REG             0x0001462c      /* cb/ct        */
+
+#define HOSTFN0_LPU0_CMD_STAT          0x00019000      /* cb/ct        */
+#define HOSTFN0_LPU1_CMD_STAT          0x00019004      /* cb/ct        */
+#define HOSTFN1_LPU0_CMD_STAT          0x00019010      /* cb/ct        */
+#define HOSTFN1_LPU1_CMD_STAT          0x00019014      /* cb/ct        */
+#define HOSTFN2_LPU0_CMD_STAT          0x00019150      /* ct           */
+#define HOSTFN2_LPU1_CMD_STAT          0x00019154      /* ct           */
+#define HOSTFN3_LPU0_CMD_STAT          0x00019160      /* ct           */
+#define HOSTFN3_LPU1_CMD_STAT          0x00019164      /* ct           */
+#define LPU0_HOSTFN0_CMD_STAT          0x00019008      /* cb/ct        */
+#define LPU1_HOSTFN0_CMD_STAT          0x0001900c      /* cb/ct        */
+#define LPU0_HOSTFN1_CMD_STAT          0x00019018      /* cb/ct        */
+#define LPU1_HOSTFN1_CMD_STAT          0x0001901c      /* cb/ct        */
+#define LPU0_HOSTFN2_CMD_STAT          0x00019158      /* ct           */
+#define LPU1_HOSTFN2_CMD_STAT          0x0001915c      /* ct           */
+#define LPU0_HOSTFN3_CMD_STAT          0x00019168      /* ct           */
+#define LPU1_HOSTFN3_CMD_STAT          0x0001916c      /* ct           */
+
+#define PSS_CTL_REG                    0x00018800      /* cb/ct        */
+#define __PSS_I2C_CLK_DIV_MK           0x007f0000
+#define __PSS_I2C_CLK_DIV_SH           16
+#define __PSS_I2C_CLK_DIV(_v)          ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE           0x00001000
+#define __PSS_LMEM_RESET               0x00000200
+#define __PSS_LMEM_INIT_EN             0x00000100
+#define __PSS_LPU1_RESET               0x00000002
+#define __PSS_LPU0_RESET               0x00000001
+#define PSS_ERR_STATUS_REG             0x00018810      /* cb/ct        */
+#define ERR_SET_REG                    0x00018818      /* cb/ct        */
+#define PSS_GPIO_OUT_REG               0x000188c0      /* cb/ct        */
+#define __PSS_GPIO_OUT_REG             0x00000fff
+#define PSS_GPIO_OE_REG                        0x000188c8      /* cb/ct        */
+#define __PSS_GPIO_OE_REG              0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0            0x00019200      /* cb/ct        */
+#define HOSTFN1_LPU_MBOX0_8            0x00019260      /* cb/ct        */
+#define LPU_HOSTFN0_MBOX0_0            0x00019280      /* cb/ct        */
+#define LPU_HOSTFN1_MBOX0_8            0x000192e0      /* cb/ct        */
+#define HOSTFN2_LPU_MBOX0_0            0x00019400      /* ct           */
+#define HOSTFN3_LPU_MBOX0_8            0x00019460      /* ct           */
+#define LPU_HOSTFN2_MBOX0_0            0x00019480      /* ct           */
+#define LPU_HOSTFN3_MBOX0_8            0x000194e0      /* ct           */
+
+#define HOST_MSIX_ERR_INDEX_FN0                0x0001400c      /* ct           */
+#define HOST_MSIX_ERR_INDEX_FN1                0x0001410c      /* ct           */
+#define HOST_MSIX_ERR_INDEX_FN2                0x0001430c      /* ct           */
+#define HOST_MSIX_ERR_INDEX_FN3                0x0001440c      /* ct           */
+
+#define MBIST_CTL_REG                  0x00014220      /* ct           */
+#define __EDRAM_BISTR_START            0x00000004
+#define MBIST_STAT_REG                 0x00014224      /* ct           */
+#define ETH_MAC_SER_REG                        0x00014288      /* ct           */
+#define __APP_EMS_CKBUFAMPIN           0x00000020
+#define __APP_EMS_REFCLKSEL            0x00000010
+#define __APP_EMS_CMLCKSEL             0x00000008
+#define __APP_EMS_REFCKBUFEN2          0x00000004
+#define __APP_EMS_REFCKBUFEN1          0x00000002
+#define __APP_EMS_CHANNEL_SEL          0x00000001
+#define FNC_PERS_REG                   0x00014604      /* ct           */
+#define __F3_FUNCTION_ACTIVE           0x80000000
+#define __F3_FUNCTION_MODE             0x40000000
+#define __F3_PORT_MAP_MK               0x30000000
+#define __F3_PORT_MAP_SH               28
+#define __F3_PORT_MAP(_v)              ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE                   0x08000000
+#define __F3_INTX_STATUS_MK            0x07000000
+#define __F3_INTX_STATUS_SH            24
+#define __F3_INTX_STATUS(_v)           ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE           0x00800000
+#define __F2_FUNCTION_MODE             0x00400000
+#define __F2_PORT_MAP_MK               0x00300000
+#define __F2_PORT_MAP_SH               20
+#define __F2_PORT_MAP(_v)              ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE                   0x00080000
+#define __F2_INTX_STATUS_MK            0x00070000
+#define __F2_INTX_STATUS_SH            16
+#define __F2_INTX_STATUS(_v)           ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE           0x00008000
+#define __F1_FUNCTION_MODE             0x00004000
+#define __F1_PORT_MAP_MK               0x00003000
+#define __F1_PORT_MAP_SH               12
+#define __F1_PORT_MAP(_v)              ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE                   0x00000800
+#define __F1_INTX_STATUS_MK            0x00000700
+#define __F1_INTX_STATUS_SH            8
+#define __F1_INTX_STATUS(_v)           ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE           0x00000080
+#define __F0_FUNCTION_MODE             0x00000040
+#define __F0_PORT_MAP_MK               0x00000030
+#define __F0_PORT_MAP_SH               4
+#define __F0_PORT_MAP(_v)              ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE                   0x00000008
+#define __F0_INTX_STATUS               0x00000007
+enum {
+       __F0_INTX_STATUS_MSIX = 0x0,
+       __F0_INTX_STATUS_INTA = 0x1,
+       __F0_INTX_STATUS_INTB = 0x2,
+       __F0_INTX_STATUS_INTC = 0x3,
+       __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE                                0x0001460c      /* ct           */
+#define __APP_ETH_CLK_LOWSPEED         0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED     0x00000002
+#define __GLOBAL_FCOE_MODE             0x00000001
+#define FW_INIT_HALT_P0                        0x000191ac      /* ct           */
+#define __FW_INIT_HALT_P               0x00000001
+#define FW_INIT_HALT_P1                        0x000191bc      /* ct           */
+#define PMM_1T_RESET_REG_P0            0x0002381c      /* ct           */
+#define __PMM_1T_RESET_P               0x00000001
+#define PMM_1T_RESET_REG_P1            0x00023c1c      /* ct           */
+
+/**
+ * Catapult-2 specific defines
+ */
+#define CT2_PCI_CPQ_BASE               0x00030000
+#define CT2_PCI_APP_BASE               0x00030100
+#define CT2_PCI_ETH_BASE               0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS          (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK           (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0                (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_                  0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK      0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH      19
+#define __PF_VF_BAR_SIZE_MODE_(_v)     ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK           0x00060000
+#define __FC_LL_PORT_MAP__SH           17
+#define __FC_LL_PORT_MAP_(_v)          ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_                        0x00010000
+#define __PF_VF_CFG_RDY_               0x00008000
+#define __PF_VF_ENABLE_                        0x00004000
+#define __PF_DRIVER_ACTIVE_            0x00002000
+#define __PF_PME_SEND_ENABLE_          0x00001000
+#define __PF_EXROM_OFFSET__MK          0x00000ff0
+#define __PF_EXROM_OFFSET__SH          4
+#define __PF_EXROM_OFFSET_(_v)         ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_                  0x00000008
+#define __PF_INTX_PIN_                 0x00000007
+#define CT2_HOSTFN_PERSONALITY1                (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK           0xff000000
+#define __PF_NUM_QUEUES1__SH           24
+#define __PF_NUM_QUEUES1_(_v)          ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK                0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH                16
+#define __PF_VF_QUE_OFFSET1_(_v)       ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK         0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH         8
+#define __PF_VF_NUM_QUEUES_(_v)                ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_            0x000000ff
+#define CT2_HOSTFN_PAGE_NUM            (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR      (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Catapult-2 CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0          (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0          (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0          (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0          (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT      (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT      (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK      (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK      (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG              0x000148f0
+#define CT2_HOST_SEM1_REG              0x000148f4
+#define CT2_HOST_SEM2_REG              0x000148f8
+#define CT2_HOST_SEM3_REG              0x000148fc
+#define CT2_HOST_SEM4_REG              0x00014900
+#define CT2_HOST_SEM5_REG              0x00014904
+#define CT2_HOST_SEM6_REG              0x00014908
+#define CT2_HOST_SEM7_REG              0x0001490c
+#define CT2_HOST_SEM0_INFO_REG         0x000148b0
+#define CT2_HOST_SEM1_INFO_REG         0x000148b4
+#define CT2_HOST_SEM2_INFO_REG         0x000148b8
+#define CT2_HOST_SEM3_INFO_REG         0x000148bc
+#define CT2_HOST_SEM4_INFO_REG         0x000148c0
+#define CT2_HOST_SEM5_INFO_REG         0x000148c4
+#define CT2_HOST_SEM6_INFO_REG         0x000148c8
+#define CT2_HOST_SEM7_INFO_REG         0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG       0x00014808
+#define __APP_LPUCLK_HALFSPEED         0x40000000
+#define __APP_PLL_LCLK_LOAD            0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK                0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH                21
+#define __APP_PLL_LCLK_FBCNT(_v)       ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+       __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+       __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB           0x00000800
+#define __APP_PLL_LCLK_ENOUTS          0x00000400
+#define __APP_PLL_LCLK_RATE            0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG       0x0001480c
+#define __P_SCLK_PLL_LOCK              0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL      0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2                0x20000000
+#define __APP_PLL_SCLK_LOAD            0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK                0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH                20
+#define __APP_PLL_SCLK_FBCNT(_v)       ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+       __APP_PLL_SCLK_FBCNT_NORM = 6,
+       __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB           0x00000800
+#define __APP_PLL_SCLK_ENOUTS          0x00000400
+#define __APP_PLL_SCLK_RATE            0x00000010
+#define CT2_PCIE_MISC_REG              0x00014804
+#define __ETH_CLK_ENABLE_PORT1         0x00000010
+#define CT2_CHIP_MISC_PRG              0x000148a4
+#define __ETH_CLK_ENABLE_PORT0         0x00004000
+#define __APP_LPU_SPEED                        0x00000002
+#define CT2_MBIST_STAT_REG             0x00014818
+#define CT2_MBIST_CTL_REG              0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0      0x0002381c
+#define __PMM_1T_PNDB_P                        0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1      0x00023c1c
+#define CT2_WGN_STATUS                 0x00014990
+#define __WGN_READY                    0x00000400
+#define __GLBL_PF_VF_CFG_RDY           0x00000200
+#define CT2_NFC_CSR_SET_REG            0x00027424
+#define __HALT_NFC_CONTROLLER          0x00000002
+#define __NFC_CONTROLLER_HALTED                0x00001000
+
+#define CT2_CSI_MAC0_CONTROL_REG       0x000270d0
+#define __CSI_MAC_RESET                        0x00000010
+#define __CSI_MAC_AHB_RESET            0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG       0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n)   \
+       (CT2_CSI_MAC0_CONTROL_REG +     \
+       (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG             HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG             HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG             HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG             HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT               HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC              HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG         CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG         CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG         CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG         CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT           CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC          CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q)   (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q)   (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0       0x00000001U
+#define __HFN_INT_CPE_Q1       0x00000002U
+#define __HFN_INT_CPE_Q2       0x00000004U
+#define __HFN_INT_CPE_Q3       0x00000008U
+#define __HFN_INT_CPE_Q4       0x00000010U
+#define __HFN_INT_CPE_Q5       0x00000020U
+#define __HFN_INT_CPE_Q6       0x00000040U
+#define __HFN_INT_CPE_Q7       0x00000080U
+#define __HFN_INT_RME_Q0       0x00000100U
+#define __HFN_INT_RME_Q1       0x00000200U
+#define __HFN_INT_RME_Q2       0x00000400U
+#define __HFN_INT_RME_Q3       0x00000800U
+#define __HFN_INT_RME_Q4       0x00001000U
+#define __HFN_INT_RME_Q5       0x00002000U
+#define __HFN_INT_RME_Q6       0x00004000U
+#define __HFN_INT_RME_Q7       0x00008000U
+#define __HFN_INT_ERR_EMC      0x00010000U
+#define __HFN_INT_ERR_LPU0     0x00020000U
+#define __HFN_INT_ERR_LPU1     0x00040000U
+#define __HFN_INT_ERR_PSS      0x00080000U
+#define __HFN_INT_MBOX_LPU0    0x00100000U
+#define __HFN_INT_MBOX_LPU1    0x00200000U
+#define __HFN_INT_MBOX1_LPU0   0x00400000U
+#define __HFN_INT_MBOX1_LPU1   0x00800000U
+#define __HFN_INT_LL_HALT      0x01000000U
+#define __HFN_INT_CPE_MASK     0x000000ffU
+#define __HFN_INT_RME_MASK     0x0000ff00U
+#define __HFN_INT_ERR_MASK     \
+       (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+        __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK     \
+       (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+        __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+        __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK     \
+       (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+        __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+        __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for catapult-2
+ */
+#define __HFN_INT_MBOX_LPU0_CT2        0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2        0x00020000U
+#define __HFN_INT_ERR_PSS_CT2  0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2  0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2        0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2        0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+       (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+        __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+        __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+        __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+       (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+        __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+        __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+       (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+        __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+        __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START            0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma)      ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma)            ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */
index 0a404bf..e3caa50 100644 (file)
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.1"
+#define BNX2FC_VERSION         "1.0.2"
 
 #define PFX                    "bnx2fc: "
 
@@ -152,7 +152,6 @@ struct bnx2fc_percpu_s {
        spinlock_t fp_work_lock;
 };
 
-
 struct bnx2fc_hba {
        struct list_head link;
        struct cnic_dev *cnic;
@@ -179,6 +178,7 @@ struct bnx2fc_hba {
                #define BNX2FC_CTLR_INIT_DONE           1
                #define BNX2FC_CREATE_DONE              2
        struct fcoe_ctlr ctlr;
+       struct list_head vports;
        u8 vlan_enabled;
        int vlan_id;
        u32 next_conn_id;
@@ -232,6 +232,11 @@ struct bnx2fc_hba {
 
 #define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
 
+struct bnx2fc_lport {
+       struct list_head list;
+       struct fc_lport *lport;
+};
+
 struct bnx2fc_cmd_mgr {
        struct bnx2fc_hba *hba;
        u16 next_idx;
@@ -423,6 +428,7 @@ struct bnx2fc_work {
 struct bnx2fc_unsol_els {
        struct fc_lport *lport;
        struct fc_frame *fp;
+       struct bnx2fc_hba *hba;
        struct work_struct unsol_els_work;
 };
 
index ab255fb..7e2b7bc 100644 (file)
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Mar 17, 2011"
+#define DRV_MODULE_RELDATE     "May 27, 2011"
 
 
 static char version[] __devinitdata =
@@ -679,6 +679,9 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
                case SPEED_1000:
                        lport->link_speed = FC_PORTSPEED_1GBIT;
                        break;
+               case SPEED_2500:
+                       lport->link_speed = FC_PORTSPEED_2GBIT;
+                       break;
                case SPEED_10000:
                        lport->link_speed = FC_PORTSPEED_10GBIT;
                        break;
@@ -1225,6 +1228,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
        hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
        set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
 
+       INIT_LIST_HEAD(&hba->vports);
        rc = bnx2fc_netdev_setup(hba);
        if (rc)
                goto setup_err;
@@ -1261,8 +1265,15 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
        struct fcoe_port        *port;
        struct Scsi_Host        *shost;
        struct fc_vport         *vport = dev_to_vport(parent);
+       struct bnx2fc_lport     *blport;
        int                     rc = 0;
 
+       blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
+       if (!blport) {
+               BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n");
+               return NULL;
+       }
+
        /* Allocate Scsi_Host structure */
        if (!npiv)
                lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
@@ -1271,7 +1282,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
 
        if (!lport) {
                printk(KERN_ERR PFX "could not allocate scsi host structure\n");
-               return NULL;
+               goto free_blport;
        }
        shost = lport->host;
        port = lport_priv(lport);
@@ -1327,12 +1338,20 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
        }
 
        bnx2fc_interface_get(hba);
+
+       spin_lock_bh(&hba->hba_lock);
+       blport->lport = lport;
+       list_add_tail(&blport->list, &hba->vports);
+       spin_unlock_bh(&hba->hba_lock);
+
        return lport;
 
 shost_err:
        scsi_remove_host(shost);
 lp_config_err:
        scsi_host_put(lport->host);
+free_blport:
+       kfree(blport);
        return NULL;
 }
 
@@ -1348,6 +1367,7 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_hba *hba = port->priv;
+       struct bnx2fc_lport *blport, *tmp;
 
        BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
        /* Stop the transmit retry timer */
@@ -1372,6 +1392,15 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
        /* Free memory used by statistical counters */
        fc_lport_free_stats(lport);
 
+       spin_lock_bh(&hba->hba_lock);
+       list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
+               if (blport->lport == lport) {
+                       list_del(&blport->list);
+                       kfree(blport);
+               }
+       }
+       spin_unlock_bh(&hba->hba_lock);
+
        /* Release Scsi_Host */
        scsi_host_put(lport->host);
 
index f756d5f..78baa46 100644 (file)
@@ -480,16 +480,36 @@ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
        return rc;
 }
 
+static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
+{
+       struct bnx2fc_lport *blport;
+
+       spin_lock_bh(&hba->hba_lock);
+       list_for_each_entry(blport, &hba->vports, list) {
+               if (blport->lport == lport) {
+                       spin_unlock_bh(&hba->hba_lock);
+                       return true;
+               }
+       }
+       spin_unlock_bh(&hba->hba_lock);
+       return false;
+
+}
+
+
 static void bnx2fc_unsol_els_work(struct work_struct *work)
 {
        struct bnx2fc_unsol_els *unsol_els;
        struct fc_lport *lport;
+       struct bnx2fc_hba *hba;
        struct fc_frame *fp;
 
        unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
        lport = unsol_els->lport;
        fp = unsol_els->fp;
-       fc_exch_recv(lport, fp);
+       hba = unsol_els->hba;
+       if (is_valid_lport(hba, lport))
+               fc_exch_recv(lport, fp);
        kfree(unsol_els);
 }
 
@@ -499,6 +519,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 {
        struct fcoe_port *port = tgt->port;
        struct fc_lport *lport = port->lport;
+       struct bnx2fc_hba *hba = port->priv;
        struct bnx2fc_unsol_els *unsol_els;
        struct fc_frame_header *fh;
        struct fc_frame *fp;
@@ -559,6 +580,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
                fr_eof(fp) = FC_EOF_T;
                fr_crc(fp) = cpu_to_le32(~crc);
                unsol_els->lport = lport;
+               unsol_els->hba = hba;
                unsol_els->fp = fp;
                INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
                queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
index b5b5c34..454c72c 100644 (file)
@@ -1734,7 +1734,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
                printk(KERN_ERR PFX "SCp.ptr is NULL\n");
                return;
        }
-       io_req->sc_cmd = NULL;
 
        if (io_req->on_active_queue) {
                list_del_init(&io_req->link);
@@ -1754,6 +1753,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
        }
 
        bnx2fc_unmap_sg_list(io_req);
+       io_req->sc_cmd = NULL;
 
        switch (io_req->fcp_status) {
        case FC_GOOD:
index 30e6bdb..aca593d 100644 (file)
@@ -1,6 +1,6 @@
 /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
  *
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index dad6c8a..1c39177 100644 (file)
@@ -1,6 +1,6 @@
 /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
  *
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 6bdd25a..54e2e03 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
 #include <linux/pci.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/in.h>
 #include <linux/kfifo.h>
 #include <linux/netdevice.h>
 #include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/cpu.h>
 
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -202,10 +205,13 @@ struct io_bdt {
 /**
  * bnx2i_cmd - iscsi command structure
  *
+ * @hdr:                iSCSI header
+ * @conn:               iscsi_conn pointer
  * @scsi_cmd:           SCSI-ML task pointer corresponding to this iscsi cmd
  * @sg:                 SG list
  * @io_tbl:             buffer descriptor (BD) table
  * @bd_tbl_dma:         buffer descriptor (BD) table's dma address
+ * @req:                bnx2i specific command request struct
  */
 struct bnx2i_cmd {
        struct iscsi_hdr hdr;
@@ -229,6 +235,7 @@ struct bnx2i_cmd {
  * @gen_pdu:               login/nopout/logout pdu resources
  * @violation_notified:    bit mask used to track iscsi error/warning messages
  *                         already printed out
+ * @work_cnt:              keeps track of the number of outstanding work
  *
  * iSCSI connection structure
  */
@@ -252,6 +259,8 @@ struct bnx2i_conn {
         */
        struct generic_pdu_resc gen_pdu;
        u64 violation_notified;
+
+       atomic_t work_cnt;
 };
 
 
@@ -661,7 +670,6 @@ enum {
  * @hba:                adapter to which this connection belongs
  * @conn:               iscsi connection this EP is linked to
  * @cls_ep:             associated iSCSI endpoint pointer
- * @sess:               iscsi session this EP is linked to
  * @cm_sk:              cnic sock struct
  * @hba_age:            age to detect if 'iscsid' issues ep_disconnect()
  *                      after HBA reset is completed by bnx2i/cnic/bnx2
@@ -687,7 +695,7 @@ struct bnx2i_endpoint {
        u32 hba_age;
        u32 state;
        unsigned long timestamp;
-       int num_active_cmds;
+       atomic_t num_active_cmds;
        u32 ec_shift;
 
        struct qp_info qp;
@@ -700,6 +708,19 @@ struct bnx2i_endpoint {
 };
 
 
+struct bnx2i_work {
+       struct list_head list;
+       struct iscsi_session *session;
+       struct bnx2i_conn *bnx2i_conn;
+       struct cqe cqe;
+};
+
+struct bnx2i_percpu_s {
+       struct task_struct *iothread;
+       struct list_head work_list;
+       spinlock_t p_work_lock;
+};
+
 
 /* Global variables */
 extern unsigned int error_mask1, error_mask2;
@@ -783,7 +804,7 @@ extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
                struct bnx2i_hba *hba, u32 iscsi_cid);
 
 extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
-extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
 
 extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
 
@@ -793,4 +814,8 @@ extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
 extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
 extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
 
+extern int bnx2i_percpu_io_thread(void *arg);
+extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+                                      struct bnx2i_conn *bnx2i_conn,
+                                      struct cqe *cqe);
 #endif
index 5c54a2d..28c6693 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -17,6 +17,8 @@
 #include <scsi/libiscsi.h>
 #include "bnx2i.h"
 
+DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
 /**
  * bnx2i_get_cid_num - get cid from ep
  * @ep:        endpoint pointer
@@ -131,16 +133,16 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
  *     the driver. EQ event is generated CQ index is hit or at least 1 CQ is
  *     outstanding and on chip timer expires
  */
-void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
 {
        struct bnx2i_5771x_cq_db *cq_db;
        u16 cq_index;
-       u16 next_index;
+       u16 next_index = 0;
        u32 num_active_cmds;
 
        /* Coalesce CQ entries only on 10G devices */
        if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
-               return;
+               return 0;
 
        /* Do not update CQ DB multiple times before firmware writes
         * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
@@ -150,16 +152,17 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
 
        if (action != CNIC_ARM_CQE_FP)
                if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
-                       return;
+                       return 0;
 
        if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
-               num_active_cmds = ep->num_active_cmds;
+               num_active_cmds = atomic_read(&ep->num_active_cmds);
                if (num_active_cmds <= event_coal_min)
                        next_index = 1;
-               else
-                       next_index = event_coal_min +
-                                    ((num_active_cmds - event_coal_min) >>
-                                    ep->ec_shift);
+               else {
+                       next_index = num_active_cmds >> ep->ec_shift;
+                       if (next_index > num_active_cmds - event_coal_min)
+                               next_index = num_active_cmds - event_coal_min;
+               }
                if (!next_index)
                        next_index = 1;
                cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -170,6 +173,7 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
 
                cq_db->sqn[0] = cq_index;
        }
+       return next_index;
 }
 
 
@@ -265,7 +269,7 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
        struct bnx2i_5771x_sq_rq_db *sq_db;
        struct bnx2i_endpoint *ep = bnx2i_conn->ep;
 
-       ep->num_active_cmds++;
+       atomic_inc(&ep->num_active_cmds);
        wmb();  /* flush SQ WQE memory before the doorbell is rung */
        if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
                sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
@@ -430,7 +434,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
        default:
                tmfabort_wqe->ref_itt = RESERVED_ITT;
        }
-       memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
+       memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun));
        tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
        tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
 
@@ -547,7 +551,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
 
        nopout_wqe->op_code = nopout_hdr->opcode;
        nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
-       memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+       memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
 
        if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
                u32 tmp = nopout_wqe->lun[0];
@@ -1331,14 +1335,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
 
 /**
  * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
- * @conn:      iscsi connection
+ * @session:   iscsi session
+ * @bnx2i_conn:        bnx2i connection
  * @cqe:       pointer to newly DMA'ed CQE entry for processing
  *
  * process SCSI CMD Response CQE & complete the request to SCSI-ML
  */
-static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
-                                      struct bnx2i_conn *bnx2i_conn,
-                                      struct cqe *cqe)
+int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+                               struct bnx2i_conn *bnx2i_conn,
+                               struct cqe *cqe)
 {
        struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
        struct bnx2i_cmd_response *resp_cqe;
@@ -1348,7 +1353,7 @@ static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
        u32 datalen = 0;
 
        resp_cqe = (struct bnx2i_cmd_response *)cqe;
-       spin_lock(&session->lock);
+       spin_lock_bh(&session->lock);
        task = iscsi_itt_to_task(conn,
                                 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
        if (!task)
@@ -1409,7 +1414,7 @@ done:
        __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
                             conn->data, datalen);
 fail:
-       spin_unlock(&session->lock);
+       spin_unlock_bh(&session->lock);
        return 0;
 }
 
@@ -1711,7 +1716,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
                hdr->flags = ISCSI_FLAG_CMD_FINAL;
                hdr->itt = task->hdr->itt;
                hdr->ttt = cpu_to_be32(nop_in->ttt);
-               memcpy(hdr->lun, nop_in->lun, 8);
+               memcpy(&hdr->lun, nop_in->lun, 8);
        }
 done:
        __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1754,7 +1759,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
        resp_hdr->opcode = async_cqe->op_code;
        resp_hdr->flags = 0x80;
 
-       memcpy(resp_hdr->lun, async_cqe->lun, 8);
+       memcpy(&resp_hdr->lun, async_cqe->lun, 8);
        resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
        resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
 
@@ -1836,21 +1841,136 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
 }
 
 
+/**
+ * bnx2i_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg:       ptr to bnx2i_percpu_info structure
+ */
+int bnx2i_percpu_io_thread(void *arg)
+{
+       struct bnx2i_percpu_s *p = arg;
+       struct bnx2i_work *work, *tmp;
+       LIST_HEAD(work_list);
+
+       set_user_nice(current, -20);
+
+       while (!kthread_should_stop()) {
+               spin_lock_bh(&p->p_work_lock);
+               while (!list_empty(&p->work_list)) {
+                       list_splice_init(&p->work_list, &work_list);
+                       spin_unlock_bh(&p->p_work_lock);
+
+                       list_for_each_entry_safe(work, tmp, &work_list, list) {
+                               list_del_init(&work->list);
+                               /* work allocated in the bh, freed here */
+                               bnx2i_process_scsi_cmd_resp(work->session,
+                                                           work->bnx2i_conn,
+                                                           &work->cqe);
+                               atomic_dec(&work->bnx2i_conn->work_cnt);
+                               kfree(work);
+                       }
+                       spin_lock_bh(&p->p_work_lock);
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_bh(&p->p_work_lock);
+               schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread
+ * @bnx2i_conn:                bnx2i connection
+ *
+ * this function is called by generic KCQ handler to queue all pending cmd
+ * completion CQEs
+ *
+ * The implementation is to queue the cmd response based on the
+ * last recorded command for the given connection.  The
+ * cpu_id gets recorded upon task_xmit.  No out-of-order completion!
+ */
+static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
+                                    struct bnx2i_conn *bnx2i_conn,
+                                    struct bnx2i_nop_in_msg *cqe)
+{
+       struct bnx2i_work *bnx2i_work = NULL;
+       struct bnx2i_percpu_s *p = NULL;
+       struct iscsi_task *task;
+       struct scsi_cmnd *sc;
+       int rc = 0;
+       int cpu;
+
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
+                                cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+       if (!task) {
+               spin_unlock(&session->lock);
+               return -EINVAL;
+       }
+       sc = task->sc;
+       spin_unlock(&session->lock);
+
+       if (!blk_rq_cpu_valid(sc->request))
+               cpu = smp_processor_id();
+       else
+               cpu = sc->request->cpu;
+
+       p = &per_cpu(bnx2i_percpu, cpu);
+       spin_lock(&p->p_work_lock);
+       if (unlikely(!p->iothread)) {
+               rc = -EINVAL;
+               goto err;
+       }
+       /* Alloc and copy to the cqe */
+       bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC);
+       if (bnx2i_work) {
+               INIT_LIST_HEAD(&bnx2i_work->list);
+               bnx2i_work->session = session;
+               bnx2i_work->bnx2i_conn = bnx2i_conn;
+               memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe));
+               list_add_tail(&bnx2i_work->list, &p->work_list);
+               atomic_inc(&bnx2i_conn->work_cnt);
+               wake_up_process(p->iothread);
+               spin_unlock(&p->p_work_lock);
+               goto done;
+       } else
+               rc = -ENOMEM;
+err:
+       spin_unlock(&p->p_work_lock);
+       bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe);
+done:
+       return rc;
+}
+
 
 /**
  * bnx2i_process_new_cqes - process newly DMA'ed CQE's
- * @bnx2i_conn:                iscsi connection
+ * @bnx2i_conn:                bnx2i connection
  *
  * this function is called by generic KCQ handler to process all pending CQE's
  */
-static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
 {
        struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       struct qp_info *qp = &bnx2i_conn->ep->qp;
+       struct qp_info *qp;
        struct bnx2i_nop_in_msg *nopin;
        int tgt_async_msg;
+       int cqe_cnt = 0;
 
+       if (bnx2i_conn->ep == NULL)
+               return 0;
+
+       qp = &bnx2i_conn->ep->qp;
+
+       if (!qp->cq_virt) {
+               printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
+                       bnx2i_conn->hba->netdev->name);
+               goto out;
+       }
        while (1) {
                nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
                if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
@@ -1873,8 +1993,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
                switch (nopin->op_code) {
                case ISCSI_OP_SCSI_CMD_RSP:
                case ISCSI_OP_SCSI_DATA_IN:
-                       bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
-                                                   qp->cq_cons_qe);
+                       /* Run the kthread engine only for data cmds
+                          All other cmds will be completed in this bh! */
+                       bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
                        break;
                case ISCSI_OP_LOGIN_RSP:
                        bnx2i_process_login_resp(session, bnx2i_conn,
@@ -1918,13 +2039,21 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
                        printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
                                          nopin->op_code);
                }
-               if (!tgt_async_msg)
-                       bnx2i_conn->ep->num_active_cmds--;
+               if (!tgt_async_msg) {
+                       if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
+                               printk(KERN_ALERT "bnx2i (%s): no active cmd! "
+                                      "op 0x%x\n",
+                                      bnx2i_conn->hba->netdev->name,
+                                      nopin->op_code);
+                       else
+                               atomic_dec(&bnx2i_conn->ep->num_active_cmds);
+               }
 cqe_out:
                /* clear out in production version only, till beta keep opcode
                 * field intact, will be helpful in debugging (context dump)
                 * nopin->op_code = 0;
                 */
+               cqe_cnt++;
                qp->cqe_exp_seq_sn++;
                if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
                        qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
@@ -1937,6 +2066,8 @@ cqe_out:
                        qp->cq_cons_idx++;
                }
        }
+out:
+       return cqe_cnt;
 }
 
 /**
@@ -1952,6 +2083,7 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
 {
        struct bnx2i_conn *bnx2i_conn;
        u32 iscsi_cid;
+       int nxt_idx;
 
        iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
        bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
@@ -1964,9 +2096,12 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
                printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
                return;
        }
+
        bnx2i_process_new_cqes(bnx2i_conn);
-       bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
-       bnx2i_process_new_cqes(bnx2i_conn);
+       nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep,
+                                               CNIC_ARM_CQE_FP);
+       if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn))
+               bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
 }
 
 
@@ -2312,7 +2447,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
                        printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
                                "opcode\n", hba->netdev->name);
                else if (ofld_kcqe->completion_status ==
-                       ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
+                        ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
                        /* error status code valid only for 5771x chipset */
                        ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
                else
@@ -2511,7 +2646,7 @@ static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
 
 
 static int bnx2i_send_nl_mesg(void *context, u32 msg_type,
-                              char *buf, u16 buflen)
+                             char *buf, u16 buflen)
 {
        struct bnx2i_hba *hba = context;
        int rc;
index 6adbdc3..4f252a9 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
 static u32 adapter_count;
 
 #define DRV_MODULE_NAME                "bnx2i"
-#define DRV_MODULE_VERSION     "2.6.2.3"
-#define DRV_MODULE_RELDATE     "Dec 31, 2010"
+#define DRV_MODULE_VERSION     "2.7.0.3"
+#define DRV_MODULE_RELDATE     "Jun 15, 2011"
 
 static char version[] __devinitdata =
                "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -40,7 +40,7 @@ unsigned int event_coal_min = 24;
 module_param(event_coal_min, int, 0664);
 MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
 
-unsigned int event_coal_div = 1;
+unsigned int event_coal_div = 2;
 module_param(event_coal_div, int, 0664);
 MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
 
@@ -66,6 +66,15 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size");
 
 u64 iscsi_error_mask = 0x00;
 
+DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
+static int bnx2i_cpu_callback(struct notifier_block *nfb,
+                             unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2i_cpu_notifier = {
+       .notifier_call = bnx2i_cpu_callback,
+};
+
 
 /**
  * bnx2i_identify_device - identifies NetXtreme II device type
@@ -163,21 +172,14 @@ void bnx2i_start(void *handle)
        struct bnx2i_hba *hba = handle;
        int i = HZ;
 
-       if (!hba->cnic->max_iscsi_conn) {
-               printk(KERN_ALERT "bnx2i: dev %s does not support "
-                       "iSCSI\n", hba->netdev->name);
+       /*
+        * We should never register devices that don't support iSCSI
+        * (see bnx2i_init_one), so something is wrong if we try to
+        * start a iSCSI adapter on hardware with 0 supported iSCSI
+        * connections
+        */
+       BUG_ON(!hba->cnic->max_iscsi_conn);
 
-               if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
-                       mutex_lock(&bnx2i_dev_lock);
-                       list_del_init(&hba->link);
-                       adapter_count--;
-                       hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
-                       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-                       mutex_unlock(&bnx2i_dev_lock);
-                       bnx2i_free_hba(hba);
-               }
-               return;
-       }
        bnx2i_send_fw_iscsi_init_msg(hba);
        while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
                msleep(BNX2I_INIT_POLL_TIME);
@@ -281,6 +283,13 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
        int rc;
 
        mutex_lock(&bnx2i_dev_lock);
+       if (!cnic->max_iscsi_conn) {
+               printk(KERN_ALERT "bnx2i: dev %s does not support "
+                       "iSCSI\n", hba->netdev->name);
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
        hba->cnic = cnic;
        rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
        if (!rc) {
@@ -298,6 +307,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
        else
                printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
 
+out:
        mutex_unlock(&bnx2i_dev_lock);
 
        return rc;
@@ -361,6 +371,91 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
 }
 
 
+/**
+ * bnx2i_percpu_thread_create - Create a receive thread for an
+ *                             online CPU
+ *
+ * @cpu:       cpu index for the online cpu
+ */
+static void bnx2i_percpu_thread_create(unsigned int cpu)
+{
+       struct bnx2i_percpu_s *p;
+       struct task_struct *thread;
+
+       p = &per_cpu(bnx2i_percpu, cpu);
+
+       thread = kthread_create(bnx2i_percpu_io_thread, (void *)p,
+                               "bnx2i_thread/%d", cpu);
+       /* bind thread to the cpu */
+       if (likely(!IS_ERR(thread))) {
+               kthread_bind(thread, cpu);
+               p->iothread = thread;
+               wake_up_process(thread);
+       }
+}
+
+
+static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+{
+       struct bnx2i_percpu_s *p;
+       struct task_struct *thread;
+       struct bnx2i_work *work, *tmp;
+
+       /* Prevent any new work from being queued for this CPU */
+       p = &per_cpu(bnx2i_percpu, cpu);
+       spin_lock_bh(&p->p_work_lock);
+       thread = p->iothread;
+       p->iothread = NULL;
+
+       /* Free all work in the list */
+       list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+               list_del_init(&work->list);
+               bnx2i_process_scsi_cmd_resp(work->session,
+                                           work->bnx2i_conn, &work->cqe);
+               kfree(work);
+       }
+
+       spin_unlock_bh(&p->p_work_lock);
+       if (thread)
+               kthread_stop(thread);
+}
+
+
+/**
+ * bnx2i_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb:       The callback data block
+ * @action:    The event triggering the callback
+ * @hcpu:      The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for iSCSI
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2i_cpu_callback(struct notifier_block *nfb,
+                             unsigned long action, void *hcpu)
+{
+       unsigned cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
+                       cpu);
+               bnx2i_percpu_thread_create(cpu);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
+               bnx2i_percpu_thread_destroy(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+
 /**
  * bnx2i_mod_init - module init entry point
  *
@@ -371,6 +466,8 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
 static int __init bnx2i_mod_init(void)
 {
        int err;
+       unsigned cpu = 0;
+       struct bnx2i_percpu_s *p;
 
        printk(KERN_INFO "%s", version);
 
@@ -393,6 +490,20 @@ static int __init bnx2i_mod_init(void)
                goto unreg_xport;
        }
 
+       /* Create percpu kernel threads to handle iSCSI I/O completions */
+       for_each_possible_cpu(cpu) {
+               p = &per_cpu(bnx2i_percpu, cpu);
+               INIT_LIST_HEAD(&p->work_list);
+               spin_lock_init(&p->p_work_lock);
+               p->iothread = NULL;
+       }
+
+       for_each_online_cpu(cpu)
+               bnx2i_percpu_thread_create(cpu);
+
+       /* Initialize per CPU interrupt thread */
+       register_hotcpu_notifier(&bnx2i_cpu_notifier);
+
        return 0;
 
 unreg_xport:
@@ -413,6 +524,7 @@ out:
 static void __exit bnx2i_mod_exit(void)
 {
        struct bnx2i_hba *hba;
+       unsigned cpu = 0;
 
        mutex_lock(&bnx2i_dev_lock);
        while (!list_empty(&adapter_list)) {
@@ -430,6 +542,11 @@ static void __exit bnx2i_mod_exit(void)
        }
        mutex_unlock(&bnx2i_dev_lock);
 
+       unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+       for_each_online_cpu(cpu)
+               bnx2i_percpu_thread_destroy(cpu);
+
        iscsi_unregister_transport(&bnx2i_iscsi_transport);
        cnic_unregister_driver(CNIC_ULP_ISCSI);
 }
index 041928b..5c55a75 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -27,6 +27,7 @@ static struct scsi_host_template bnx2i_host_template;
  */
 static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
 
+DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
 
 static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
 {
@@ -1214,7 +1215,8 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
        struct bnx2i_cmd *cmd = task->dd_data;
        struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
 
-       if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+       if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1  >
+           hba->max_sqes)
                return -ENOMEM;
 
        /*
@@ -1354,6 +1356,9 @@ bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
        bnx2i_conn = conn->dd_data;
        bnx2i_conn->cls_conn = cls_conn;
        bnx2i_conn->hba = hba;
+
+       atomic_set(&bnx2i_conn->work_cnt, 0);
+
        /* 'ep' ptr will be assigned in bind() call */
        bnx2i_conn->ep = NULL;
        init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
@@ -1457,11 +1462,34 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
        struct bnx2i_conn *bnx2i_conn = conn->dd_data;
        struct Scsi_Host *shost;
        struct bnx2i_hba *hba;
+       struct bnx2i_work *work, *tmp;
+       unsigned cpu = 0;
+       struct bnx2i_percpu_s *p;
 
        shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
        hba = iscsi_host_priv(shost);
 
        bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+
+       if (atomic_read(&bnx2i_conn->work_cnt)) {
+               for_each_online_cpu(cpu) {
+                       p = &per_cpu(bnx2i_percpu, cpu);
+                       spin_lock_bh(&p->p_work_lock);
+                       list_for_each_entry_safe(work, tmp,
+                                                &p->work_list, list) {
+                               if (work->session == conn->session &&
+                                   work->bnx2i_conn == bnx2i_conn) {
+                                       list_del_init(&work->list);
+                                       kfree(work);
+                                       if (!atomic_dec_and_test(
+                                                       &bnx2i_conn->work_cnt))
+                                               break;
+                               }
+                       }
+                       spin_unlock_bh(&p->p_work_lock);
+               }
+       }
+
        iscsi_conn_teardown(cls_conn);
 }
 
@@ -1769,7 +1797,7 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
        }
        bnx2i_ep = ep->dd_data;
 
-       bnx2i_ep->num_active_cmds = 0;
+       atomic_set(&bnx2i_ep->num_active_cmds, 0);
        iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
        if (iscsi_cid == -1) {
                printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
@@ -2163,9 +2191,9 @@ static struct scsi_host_template bnx2i_host_template = {
        .eh_device_reset_handler = iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
        .change_queue_depth     = iscsi_change_queue_depth,
-       .can_queue              = 1024,
+       .can_queue              = 2048,
        .max_sectors            = 127,
-       .cmd_per_lun            = 24,
+       .cmd_per_lun            = 128,
        .this_id                = -1,
        .use_clustering         = ENABLE_CLUSTERING,
        .sg_tablesize           = ISCSI_MAX_BDS_PER_CMD,
index 9174196..83a77f7 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2004 - 2010 Broadcom Corporation
+ * Copyright (c) 2004 - 2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index fc2cdb6..ffb9eea 100644 (file)
@@ -1245,7 +1245,7 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
        struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
        struct ulp_iscsi_info uinfo;
        unsigned int pgsz_factor[4];
-       int err;
+       int i, err;
 
        if (ddp) {
                kref_get(&ddp->refcnt);
@@ -1271,6 +1271,8 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
 
        uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
        cxgbi_ddp_page_size_factor(pgsz_factor);
+       for (i = 0; i < 4; i++)
+               uinfo.pgsz_factor[i] = pgsz_factor[i];
        uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
 
        err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
index 155d7b9..204fa8d 100644 (file)
@@ -99,7 +99,8 @@ static void fcoe_destroy_work(struct work_struct *);
 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
                          unsigned int);
 static int fcoe_ddp_done(struct fc_lport *, u16);
-
+static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
+                          unsigned int);
 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
 
 static bool fcoe_match(struct net_device *netdev);
@@ -143,6 +144,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .frame_send = fcoe_xmit,
        .ddp_setup = fcoe_ddp_setup,
        .ddp_done = fcoe_ddp_done,
+       .ddp_target = fcoe_ddp_target,
        .elsct_send = fcoe_elsct_send,
        .get_lesb = fcoe_get_lesb,
        .lport_set_port_id = fcoe_set_port_id,
@@ -429,21 +431,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        struct fcoe_ctlr *fip = &fcoe->ctlr;
        u8 flogi_maddr[ETH_ALEN];
        const struct net_device_ops *ops;
-       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
-
-       FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
-
-       /* Logout of the fabric */
-       fc_fabric_logoff(fcoe->ctlr.lp);
-
-       /* Cleanup the fc_lport */
-       fc_lport_destroy(fcoe->ctlr.lp);
-
-       /* Stop the transmit retry timer */
-       del_timer_sync(&port->timer);
-
-       /* Free existing transmit skbs */
-       fcoe_clean_pending_queue(fcoe->ctlr.lp);
 
        /*
         * Don't listen for Ethernet packets anymore.
@@ -466,9 +453,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        } else
                dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
 
-       if (!is_zero_ether_addr(port->data_src_addr))
-               dev_uc_del(netdev, port->data_src_addr);
-
        /* Tell the LLD we are done w/ FCoE */
        ops = netdev->netdev_ops;
        if (ops->ndo_fcoe_disable) {
@@ -476,6 +460,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
                        FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
                                        " specific feature for LLD.\n");
        }
+
+       /* Release the self-reference taken during fcoe_interface_create() */
        fcoe_interface_put(fcoe);
 }
 
@@ -749,12 +735,27 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
  * The offload EM that this routine is associated with will handle any
  * packets that are for SCSI read requests.
  *
+ * This has been enhanced to work when FCoE stack is operating in target
+ * mode.
+ *
  * Returns: True for read types I/O, otherwise returns false.
  */
 bool fcoe_oem_match(struct fc_frame *fp)
 {
-       return fc_fcp_is_read(fr_fsp(fp)) &&
-               (fr_fsp(fp)->data_len > fcoe_ddp_min);
+       struct fc_frame_header *fh = fc_frame_header_get(fp);
+       struct fcp_cmnd *fcp;
+
+       if (fc_fcp_is_read(fr_fsp(fp)) &&
+           (fr_fsp(fp)->data_len > fcoe_ddp_min))
+               return true;
+       else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+               fcp = fc_frame_payload_get(fp, sizeof(*fcp));
+               if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
+                   fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
+                   (fcp->fc_flags & FCP_CFL_WRDATA))
+                       return true;
+       }
+       return false;
 }
 
 /**
@@ -844,6 +845,32 @@ skip_oem:
  */
 static void fcoe_if_destroy(struct fc_lport *lport)
 {
+       struct fcoe_port *port = lport_priv(lport);
+       struct fcoe_interface *fcoe = port->priv;
+       struct net_device *netdev = fcoe->netdev;
+
+       FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
+
+       /* Logout of the fabric */
+       fc_fabric_logoff(lport);
+
+       /* Cleanup the fc_lport */
+       fc_lport_destroy(lport);
+
+       /* Stop the transmit retry timer */
+       del_timer_sync(&port->timer);
+
+       /* Free existing transmit skbs */
+       fcoe_clean_pending_queue(lport);
+
+       rtnl_lock();
+       if (!is_zero_ether_addr(port->data_src_addr))
+               dev_uc_del(netdev, port->data_src_addr);
+       rtnl_unlock();
+
+       /* Release reference held in fcoe_if_create() */
+       fcoe_interface_put(fcoe);
+
        /* Free queued packets for the per-CPU receive threads */
        fcoe_percpu_clean(lport);
 
@@ -886,6 +913,28 @@ static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
        return 0;
 }
 
+/**
+ * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
+ * @lport: The local port to setup DDP for
+ * @xid:   The exchange ID for this DDP transfer
+ * @sgl:   The scatterlist describing this transfer
+ * @sgc:   The number of sg items
+ *
+ * Returns: 0 if the DDP context was not configured
+ */
+static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
+                          struct scatterlist *sgl, unsigned int sgc)
+{
+       struct net_device *netdev = fcoe_netdev(lport);
+
+       if (netdev->netdev_ops->ndo_fcoe_ddp_target)
+               return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
+                                                              sgl, sgc);
+
+       return 0;
+}
+
+
 /**
  * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
  * @lport: The local port to complete DDP on
@@ -1205,6 +1254,36 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
+/**
+ * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
+ *                     command.
+ * @curr_cpu:   CPU which received request
+ *
+ * This routine selects next CPU based on cpumask.
+ *
+ * Returns: int (CPU number). Caller to verify if returned CPU is online or not.
+ */
+static unsigned int fcoe_select_cpu(unsigned int curr_cpu)
+{
+       static unsigned int selected_cpu;
+
+       if (num_online_cpus() == 1)
+               return curr_cpu;
+       /*
+        * Doing following check, to skip "curr_cpu (smp_processor_id)"
+        * from selection of CPU is intentional. This is to avoid same CPU
+        * doing post-processing of command. "curr_cpu" to just receive
+        * incoming request in case where rx_id is UNKNOWN and all other
+        * CPU to actually process the command(s)
+        */
+       do {
+               selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
+               if (selected_cpu >= nr_cpu_ids)
+                       selected_cpu = cpumask_first(cpu_online_mask);
+       } while (selected_cpu == curr_cpu);
+       return selected_cpu;
+}
+
 /**
  * fcoe_rcv() - Receive packets from a net device
  * @skb:    The received packet
@@ -1281,9 +1360,20 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
         */
        if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
                cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
-       else
+       else {
                cpu = smp_processor_id();
 
+               if ((fh->fh_type == FC_TYPE_FCP) &&
+                   (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
+                       do {
+                               cpu = fcoe_select_cpu(cpu);
+                       } while (!cpu_online(cpu));
+               } else  if ((fh->fh_type == FC_TYPE_FCP) &&
+                           (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
+                       cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
+               } else
+                       cpu = smp_processor_id();
+       }
        fps = &per_cpu(fcoe_percpu, cpu);
        spin_lock_bh(&fps->fcoe_rx_list.lock);
        if (unlikely(!fps->thread)) {
@@ -1733,7 +1823,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
        case NETDEV_UNREGISTER:
                list_del(&fcoe->list);
                port = lport_priv(fcoe->ctlr.lp);
-               fcoe_interface_cleanup(fcoe);
                queue_work(fcoe_wq, &port->destroy_work);
                goto out;
                break;
@@ -1827,22 +1916,22 @@ static int fcoe_destroy(struct net_device *netdev)
 {
        struct fcoe_interface *fcoe;
        struct fc_lport *lport;
+       struct fcoe_port *port;
        int rc = 0;
 
        mutex_lock(&fcoe_config_mutex);
        rtnl_lock();
        fcoe = fcoe_hostlist_lookup_port(netdev);
        if (!fcoe) {
-               rtnl_unlock();
                rc = -ENODEV;
                goto out_nodev;
        }
        lport = fcoe->ctlr.lp;
+       port = lport_priv(lport);
        list_del(&fcoe->list);
-       fcoe_interface_cleanup(fcoe);
-       rtnl_unlock();
-       fcoe_if_destroy(lport);
+       queue_work(fcoe_wq, &port->destroy_work);
 out_nodev:
+       rtnl_unlock();
        mutex_unlock(&fcoe_config_mutex);
        return rc;
 }
@@ -1854,10 +1943,25 @@ out_nodev:
 static void fcoe_destroy_work(struct work_struct *work)
 {
        struct fcoe_port *port;
+       struct fcoe_interface *fcoe;
+       int npiv = 0;
 
        port = container_of(work, struct fcoe_port, destroy_work);
        mutex_lock(&fcoe_config_mutex);
+
+       /* set if this is an NPIV port */
+       npiv = port->lport->vport ? 1 : 0;
+
+       fcoe = port->priv;
        fcoe_if_destroy(port->lport);
+
+       /* Do not tear down the fcoe interface for NPIV port */
+       if (!npiv) {
+               rtnl_lock();
+               fcoe_interface_cleanup(fcoe);
+               rtnl_unlock();
+       }
+
        mutex_unlock(&fcoe_config_mutex);
 }
 
@@ -1886,7 +1990,7 @@ static bool fcoe_match(struct net_device *netdev)
  */
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
 {
-       int rc;
+       int rc = 0;
        struct fcoe_interface *fcoe;
        struct fc_lport *lport;
 
@@ -1911,7 +2015,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
                       netdev->name);
                rc = -EIO;
                fcoe_interface_cleanup(fcoe);
-               goto out_free;
+               goto out_nodev;
        }
 
        /* Make this the "master" N_Port */
@@ -1926,17 +2030,6 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        if (!fcoe_link_ok(lport))
                fcoe_ctlr_link_up(&fcoe->ctlr);
 
-       /*
-        * Release from init in fcoe_interface_create(), on success lport
-        * should be holding a reference taken in fcoe_if_create().
-        */
-       fcoe_interface_put(fcoe);
-       rtnl_unlock();
-       mutex_unlock(&fcoe_config_mutex);
-
-       return 0;
-out_free:
-       fcoe_interface_put(fcoe);
 out_nodev:
        rtnl_unlock();
        mutex_unlock(&fcoe_config_mutex);
@@ -2218,7 +2311,6 @@ static void __exit fcoe_exit(void)
        list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
                list_del(&fcoe->list);
                port = lport_priv(fcoe->ctlr.lp);
-               fcoe_interface_cleanup(fcoe);
                queue_work(fcoe_wq, &port->destroy_work);
        }
        rtnl_unlock();
index 671cde9..95a5ba2 100644 (file)
@@ -37,7 +37,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.1"
+#define DRV_VERSION            "1.5.0.2"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index bb63f1a..fc98eb6 100644 (file)
@@ -388,17 +388,6 @@ static void fnic_iounmap(struct fnic *fnic)
                iounmap(fnic->bar0.vaddr);
 }
 
-/*
- * Allocate element for mempools requiring GFP_DMA flag.
- * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
- */
-static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
-{
-       struct kmem_cache *mem = pool_data;
-
-       return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
-}
-
 /**
  * fnic_get_mac() - get assigned data MAC address for FIP code.
  * @lport:     local port.
@@ -603,14 +592,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
        if (!fnic->io_req_pool)
                goto err_out_free_resources;
 
-       pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
-                             fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+       pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        if (!pool)
                goto err_out_free_ioreq_pool;
        fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
 
-       pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
-                             fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+       pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
        if (!pool)
                goto err_out_free_dflt_pool;
        fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
@@ -876,7 +863,7 @@ static int __init fnic_init_module(void)
        len = sizeof(struct fnic_dflt_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
                ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-                SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+                SLAB_HWCACHE_ALIGN,
                 NULL);
        if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
                printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
@@ -888,7 +875,7 @@ static int __init fnic_init_module(void)
        len = sizeof(struct fnic_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
                ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-                SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+                SLAB_HWCACHE_ALIGN,
                 NULL);
        if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
                printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
index 538b31c..c40ce52 100644 (file)
@@ -406,7 +406,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        if (sg_count) {
                io_req->sgl_list =
                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
-                                     GFP_ATOMIC | GFP_DMA);
+                                     GFP_ATOMIC);
                if (!io_req->sgl_list) {
                        ret = SCSI_MLQUEUE_HOST_BUSY;
                        scsi_dma_unmap(sc);
index df6bff7..89700cb 100644 (file)
@@ -64,7 +64,8 @@ static void iscsi_boot_kobj_release(struct kobject *kobj)
        struct iscsi_boot_kobj *boot_kobj =
                        container_of(kobj, struct iscsi_boot_kobj, kobj);
 
-       kfree(boot_kobj->data);
+       if (boot_kobj->release)
+               boot_kobj->release(boot_kobj->data);
        kfree(boot_kobj);
 }
 
@@ -305,7 +306,8 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
                       struct attribute_group *attr_group,
                       const char *name, int index, void *data,
                       ssize_t (*show) (void *data, int type, char *buf),
-                      mode_t (*is_visible) (void *data, int type))
+                      mode_t (*is_visible) (void *data, int type),
+                      void (*release) (void *data))
 {
        struct iscsi_boot_kobj *boot_kobj;
 
@@ -323,6 +325,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
        boot_kobj->data = data;
        boot_kobj->show = show;
        boot_kobj->is_visible = is_visible;
+       boot_kobj->release = release;
 
        if (sysfs_create_group(&boot_kobj->kobj, attr_group)) {
                /*
@@ -331,7 +334,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
                 * the boot kobj was not setup and the normal release
                 * path is not being run.
                 */
-               boot_kobj->data = NULL;
+               boot_kobj->release = NULL;
                kobject_put(&boot_kobj->kobj);
                return NULL;
        }
@@ -357,6 +360,7 @@ static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj)
  * @data: driver specific data for target
  * @show: attr show function
  * @is_visible: attr visibility function
+ * @release: release function
  *
  * Note: The boot sysfs lib will free the data passed in for the caller
  * when all refs to the target kobject have been released.
@@ -365,10 +369,12 @@ struct iscsi_boot_kobj *
 iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
                         void *data,
                         ssize_t (*show) (void *data, int type, char *buf),
-                        mode_t (*is_visible) (void *data, int type))
+                        mode_t (*is_visible) (void *data, int type),
+                        void (*release) (void *data))
 {
        return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
-                                     "target%d", index, data, show, is_visible);
+                                     "target%d", index, data, show, is_visible,
+                                     release);
 }
 EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
 
@@ -379,6 +385,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
  * @data: driver specific data
  * @show: attr show function
  * @is_visible: attr visibility function
+ * @release: release function
  *
  * Note: The boot sysfs lib will free the data passed in for the caller
  * when all refs to the initiator kobject have been released.
@@ -387,12 +394,13 @@ struct iscsi_boot_kobj *
 iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
                            void *data,
                            ssize_t (*show) (void *data, int type, char *buf),
-                           mode_t (*is_visible) (void *data, int type))
+                           mode_t (*is_visible) (void *data, int type),
+                           void (*release) (void *data))
 {
        return iscsi_boot_create_kobj(boot_kset,
                                      &iscsi_boot_initiator_attr_group,
                                      "initiator", index, data, show,
-                                     is_visible);
+                                     is_visible, release);
 }
 EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
 
@@ -403,6 +411,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
  * @data: driver specific data
  * @show: attr show function
  * @is_visible: attr visibility function
+ * @release: release function
  *
  * Note: The boot sysfs lib will free the data passed in for the caller
  * when all refs to the ethernet kobject have been released.
@@ -411,12 +420,13 @@ struct iscsi_boot_kobj *
 iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
                           void *data,
                           ssize_t (*show) (void *data, int type, char *buf),
-                          mode_t (*is_visible) (void *data, int type))
+                          mode_t (*is_visible) (void *data, int type),
+                          void (*release) (void *data))
 {
        return iscsi_boot_create_kobj(boot_kset,
                                      &iscsi_boot_ethernet_attr_group,
                                      "ethernet%d", index, data, show,
-                                     is_visible);
+                                     is_visible, release);
 }
 EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
 
@@ -472,6 +482,9 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
 {
        struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
 
+       if (!boot_kset)
+               return;
+
        list_for_each_entry_safe(boot_kobj, tmp_kobj,
                                 &boot_kset->kobj_list, list)
                iscsi_boot_remove_kobj(boot_kobj);
index 3df9853..7724414 100644 (file)
@@ -107,10 +107,12 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  * If the socket is in CLOSE or CLOSE_WAIT we should
  * not close the connection if there is still some
  * data pending.
+ *
+ * Must be called with sk_callback_lock.
  */
 static inline int iscsi_sw_sk_state_check(struct sock *sk)
 {
-       struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
+       struct iscsi_conn *conn = sk->sk_user_data;
 
        if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
            !atomic_read(&sk->sk_rmem_alloc)) {
@@ -123,11 +125,17 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
 
 static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
 {
-       struct iscsi_conn *conn = sk->sk_user_data;
-       struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+       struct iscsi_conn *conn;
+       struct iscsi_tcp_conn *tcp_conn;
        read_descriptor_t rd_desc;
 
        read_lock(&sk->sk_callback_lock);
+       conn = sk->sk_user_data;
+       if (!conn) {
+               read_unlock(&sk->sk_callback_lock);
+               return;
+       }
+       tcp_conn = conn->dd_data;
 
        /*
         * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
@@ -141,11 +149,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
 
        iscsi_sw_sk_state_check(sk);
 
-       read_unlock(&sk->sk_callback_lock);
-
        /* If we had to (atomically) map a highmem page,
         * unmap it now. */
        iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+       read_unlock(&sk->sk_callback_lock);
 }
 
 static void iscsi_sw_tcp_state_change(struct sock *sk)
@@ -157,8 +164,11 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
        void (*old_state_change)(struct sock *);
 
        read_lock(&sk->sk_callback_lock);
-
-       conn = (struct iscsi_conn*)sk->sk_user_data;
+       conn = sk->sk_user_data;
+       if (!conn) {
+               read_unlock(&sk->sk_callback_lock);
+               return;
+       }
        session = conn->session;
 
        iscsi_sw_sk_state_check(sk);
@@ -178,11 +188,25 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
  **/
 static void iscsi_sw_tcp_write_space(struct sock *sk)
 {
-       struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
-       struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-       struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+       struct iscsi_conn *conn;
+       struct iscsi_tcp_conn *tcp_conn;
+       struct iscsi_sw_tcp_conn *tcp_sw_conn;
+       void (*old_write_space)(struct sock *);
+
+       read_lock_bh(&sk->sk_callback_lock);
+       conn = sk->sk_user_data;
+       if (!conn) {
+               read_unlock_bh(&sk->sk_callback_lock);
+               return;
+       }
+
+       tcp_conn = conn->dd_data;
+       tcp_sw_conn = tcp_conn->dd_data;
+       old_write_space = tcp_sw_conn->old_write_space;
+       read_unlock_bh(&sk->sk_callback_lock);
+
+       old_write_space(sk);
 
-       tcp_sw_conn->old_write_space(sk);
        ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
        iscsi_conn_queue_work(conn);
 }
@@ -592,20 +616,17 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
        /* userspace may have goofed up and not bound us */
        if (!sock)
                return;
-       /*
-        * Make sure our recv side is stopped.
-        * Older tools called conn stop before ep_disconnect
-        * so IO could still be coming in.
-        */
-       write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-       write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
 
        sock->sk->sk_err = EIO;
        wake_up_interruptible(sk_sleep(sock->sk));
 
-       iscsi_conn_stop(cls_conn, flag);
+       /* stop xmit side */
+       iscsi_suspend_tx(conn);
+
+       /* stop recv side and release socket */
        iscsi_sw_tcp_release_conn(conn);
+
+       iscsi_conn_stop(cls_conn, flag);
 }
 
 static int
index 3b8a645..f5a0665 100644 (file)
@@ -965,8 +965,30 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
                sp = &ep->seq;
                if (sp->id != fh->fh_seq_id) {
                        atomic_inc(&mp->stats.seq_not_found);
-                       reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
-                       goto rel;
+                       if (f_ctl & FC_FC_END_SEQ) {
+                               /*
+                                * Update sequence_id based on incoming last
+                                * frame of sequence exchange. This is needed
+                                * for FCoE target where DDP has been used
+                                * on target where, stack is indicated only
+                                * about last frame's (payload _header) header.
+                                * Whereas "seq_id" which is part of
+                                * frame_header is allocated by initiator
+                                * which is totally different from "seq_id"
+                                * allocated when XFER_RDY was sent by target.
+                                * To avoid false -ve which results into not
+                                * sending RSP, hence write request on other
+                                * end never finishes.
+                                */
+                               spin_lock_bh(&ep->ex_lock);
+                               sp->ssb_stat |= SSB_ST_RESP;
+                               sp->id = fh->fh_seq_id;
+                               spin_unlock_bh(&ep->ex_lock);
+                       } else {
+                               /* sequence/exch should exist */
+                               reject = FC_RJT_SEQ_ID;
+                               goto rel;
+                       }
                }
        }
        WARN_ON(ep != fc_seq_exch(sp));
index 389ab80..e008b16 100644 (file)
@@ -1025,6 +1025,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
                        fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
        }
        fc_lport_state_enter(lport, LPORT_ST_RESET);
+       fc_host_post_event(lport->host, fc_get_event_number(),
+                          FCH_EVT_LIPRESET, 0);
        fc_vports_linkchange(lport);
        fc_lport_reset_locked(lport);
        if (lport->link_up)
index 49e1ccc..3b66937 100644 (file)
@@ -801,6 +801,20 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
 
        switch (rdata->rp_state) {
        case RPORT_ST_INIT:
+               /*
+                * If received the FLOGI request on RPORT which is INIT state
+                * (means not transition to FLOGI either fc_rport timeout
+                * function didn;t trigger or this end hasn;t received
+                * beacon yet from other end. In that case only, allow RPORT
+                * state machine to continue, otherwise fall through which
+                * causes the code to send reject response.
+                * NOTE; Not checking for FIP->state such as VNMP_UP or
+                * VNMP_CLAIM because if FIP state is not one of those,
+                * RPORT wouldn;t have created and 'rport_lookup' would have
+                * failed anyway in that case.
+                */
+               if (lport->point_to_multipoint)
+                       break;
        case RPORT_ST_DELETE:
                mutex_unlock(&rdata->rp_mutex);
                rjt_data.reason = ELS_RJT_FIP;
index 0c550d5..d7a4120 100644 (file)
@@ -169,7 +169,7 @@ void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t
        hdr->datasn = cpu_to_be32(r2t->datasn);
        r2t->datasn++;
        hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-       memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+       hdr->lun = task->lun;
        hdr->itt = task->hdr_itt;
        hdr->exp_statsn = r2t->exp_statsn;
        hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
@@ -296,7 +296,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
                /*
                 * Allow PDUs for unrelated LUNs
                 */
-               hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun);
+               hdr_lun = scsilun_to_int(&tmf->lun);
                if (hdr_lun != task->sc->device->lun)
                        return 0;
                /* fall through */
@@ -389,8 +389,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
                return rc;
        hdr->opcode = ISCSI_OP_SCSI_CMD;
        hdr->flags = ISCSI_ATTR_SIMPLE;
-       int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
-       memcpy(task->lun, hdr->lun, sizeof(task->lun));
+       int_to_scsilun(sc->device->lun, &hdr->lun);
+       task->lun = hdr->lun;
        hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
        cmd_len = sc->cmd_len;
        if (cmd_len < ISCSI_CDB_SIZE)
@@ -968,7 +968,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
        hdr.flags = ISCSI_FLAG_CMD_FINAL;
 
        if (rhdr) {
-               memcpy(hdr.lun, rhdr->lun, 8);
+               hdr.lun = rhdr->lun;
                hdr.ttt = rhdr->ttt;
                hdr.itt = RESERVED_ITT;
        } else
@@ -2092,7 +2092,7 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
        hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
        hdr->flags |= ISCSI_FLAG_CMD_FINAL;
-       memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+       hdr->lun = task->lun;
        hdr->rtt = task->hdr_itt;
        hdr->refcmdsn = task->cmdsn;
 }
@@ -2233,7 +2233,7 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
        hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
        hdr->flags |= ISCSI_FLAG_CMD_FINAL;
-       int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+       int_to_scsilun(sc->device->lun, &hdr->lun);
        hdr->rtt = RESERVED_ITT;
 }
 
index e98ae33..09b232f 100644 (file)
@@ -1084,7 +1084,8 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_tcp_conn *tcp_conn;
 
-       cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
+       cls_conn = iscsi_conn_setup(cls_session,
+                                   sizeof(*tcp_conn) + dd_data_size, conn_idx);
        if (!cls_conn)
                return NULL;
        conn = cls_conn->dd_data;
@@ -1096,22 +1097,13 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
 
        tcp_conn = conn->dd_data;
        tcp_conn->iscsi_conn = conn;
-
-       tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
-       if (!tcp_conn->dd_data) {
-               iscsi_conn_teardown(cls_conn);
-               return NULL;
-       }
+       tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
        return cls_conn;
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
 
 void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
-       struct iscsi_conn *conn = cls_conn->dd_data;
-       struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-
-       kfree(tcp_conn->dd_data);
        iscsi_conn_teardown(cls_conn);
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
index ffe82d1..30b25c5 100644 (file)
@@ -1147,7 +1147,8 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
 {
        char mybuf[64];
        char *pbuf, *step_str;
-       int bsize, i;
+       int i;
+       size_t bsize;
 
        /* Protect copy from user */
        if (!access_ok(VERIFY_READ, buf, nbytes))
index bf2a1c5..af3a6af 100644 (file)
@@ -214,13 +214,6 @@ static int __init mac_scsi_setup(char *str) {
 
 __setup("mac5380=", mac_scsi_setup);
 
-/*
- * If you want to find the instance with (k)gdb ...
- */
-#if NDEBUG
-static struct Scsi_Host *default_instance;
-#endif
-
 /*
  * Function : int macscsi_detect(struct scsi_host_template * tpnt)
  *
@@ -233,7 +226,7 @@ static struct Scsi_Host *default_instance;
  *
  */
  
-int macscsi_detect(struct scsi_host_template * tpnt)
+int __init macscsi_detect(struct scsi_host_template * tpnt)
 {
     static int called = 0;
     int flags = 0;
@@ -268,10 +261,7 @@ int macscsi_detect(struct scsi_host_template * tpnt)
     /* Once we support multiple 5380s (e.g. DuoDock) we'll do
        something different here */
     instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
-#if NDEBUG
-    default_instance = instance;
-#endif
-    
+
     if (macintosh_config->ident == MAC_MODEL_IIFX) {
        mac_scsi_regp  = via1+0x8000;
        mac_scsi_drq   = via1+0xE000;
index a3e6038..3105d5e 100644 (file)
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.17
+ *  mpi2.h Version:  02.00.18
  *
  *  Version History
  *  ---------------
@@ -64,6 +64,8 @@
  *  05-12-10  02.00.16  Bumped MPI2_HEADER_VERSION_UNIT.
  *                      Added alternative defines for the SGE Direction bit.
  *  08-11-10  02.00.17  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  11-10-10  02.00.18  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
  *  --------------------------------------------------------------------------
  */
 
@@ -89,7 +91,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x11)
+#define MPI2_HEADER_VERSION_UNIT            (0x12)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -1060,10 +1062,14 @@ typedef struct _MPI2_IEEE_SGE_UNION
 
 #define MPI2_IEEE_SGE_FLAGS_ADDR_MASK           (0x03)
 #define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR         (0x00)
+                                               /* IEEE Simple Element only */
 #define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR         (0x01)
+                                               /* IEEE Simple Element only */
 #define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR         (0x02)
 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR      (0x03)
-
+                                               /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR   (0x03)
+                                               /* IEEE Chain Element only */
 
 /****************************************************************************
 *  IEEE SGE operation Macros
index f5b9c76..61475a6 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.16
+ *    mpi2_cnfg.h Version:  02.00.17
  *
  *  Version History
  *  ---------------
  *                      Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
  *  08-11-10  02.00.16  Removed IO Unit Page 1 device path (multi-pathing)
  *                      defines.
+ *  11-10-10  02.00.17  Added ReceptacleID field (replacing Reserved1) to
+ *                      MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ *                      the Pinout field.
+ *                      Added BoardTemperature and BoardTemperatureUnits fields
+ *                      to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ *                      Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ *                      and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
  *  --------------------------------------------------------------------------
  */
 
@@ -210,6 +217,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
 #define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING      (0x17)
 #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT            (0x18)
 #define MPI2_CONFIG_EXTPAGETYPE_ETHERNET            (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING   (0x1A)
 
 
 /*****************************************************************************
@@ -612,23 +620,31 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
     U32                         Pinout;                 /* 0x00 */
     U8                          Connector[16];          /* 0x04 */
     U8                          Location;               /* 0x14 */
-    U8                          Reserved1;              /* 0x15 */
+       U8                          ReceptacleID;           /* 0x15 */
     U16                         Slot;                   /* 0x16 */
     U32                         Reserved2;              /* 0x18 */
 } MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
   Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
 
 /* defines for the Pinout field */
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L4                (0x00080000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L3                (0x00040000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L2                (0x00020000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L1                (0x00010000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L4                (0x00000800)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L3                (0x00000400)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L2                (0x00000200)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L1                (0x00000100)
-#define MPI2_MANPAGE7_PINOUT_SFF_8482                   (0x00000002)
-#define MPI2_MANPAGE7_PINOUT_CONNECTION_UNKNOWN         (0x00000001)
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK                  (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT                 (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK                  (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN               (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE                (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482                   (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486                   (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484                   (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087                   (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I                (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I                (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470                   (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088                   (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X                (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X                (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X               (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436                   (0x0D)
 
 /* defines for the Location field */
 #define MPI2_MANPAGE7_LOCATION_UNKNOWN                  (0x01)
@@ -662,7 +678,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
   MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
   Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
 
-#define MPI2_MANUFACTURING7_PAGEVERSION                 (0x00)
+#define MPI2_MANUFACTURING7_PAGEVERSION                 (0x01)
 
 /* defines for the Flags field */
 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO                (0x00000001)
@@ -849,11 +865,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
     U16                     IOCTemperature;                         /* 0x10 */
     U8                      IOCTemperatureUnits;                    /* 0x12 */
     U8                      IOCSpeed;                               /* 0x13 */
-    U32                     Reserved3;                              /* 0x14 */
+       U16                     BoardTemperature;              /* 0x14 */
+       U8                      BoardTemperatureUnits;         /* 0x16 */
+       U8                      Reserved3;                     /* 0x17 */
 } MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
   Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
 
-#define MPI2_IOUNITPAGE7_PAGEVERSION                    (0x01)
+#define MPI2_IOUNITPAGE7_PAGEVERSION                    (0x02)
 
 /* defines for IO Unit Page 7 PCIeWidth field */
 #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1              (0x01)
@@ -881,7 +899,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
 #define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE    (0x00000008)
 #define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE    (0x00000004)
 
-
 /* defines for IO Unit Page 7 IOCTemperatureUnits field */
 #define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT       (0x00)
 #define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT        (0x01)
@@ -893,6 +910,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
 #define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER          (0x04)
 #define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH           (0x08)
 
+/* defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT     (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT      (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS         (0x02)
+
 
 
 /****************************************************************************
@@ -2799,5 +2821,25 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
 #define MPI2_ETHPG1_MS_DATA_RATE_1GBIT              (0x03)
 
 
+/****************************************************************************
+*   Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ * Generic structure to use for product-specific extended manufacturing pages
+ * (currently Extended Manufacturing Page 40 through Extended Manufacturing
+ * Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+       MPI2_CONFIG_EXTENDED_PAGE_HEADER    Header;                 /* 0x00 */
+       U32                                 ProductSpecificInfo;    /* 0x08 */
+}      MPI2_CONFIG_PAGE_EXT_MAN_PS,
+       MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+       Mpi2ExtManufacturingPagePS_t,
+       MPI2_POINTER pMpi2ExtManufacturingPagePS_t;
+
+/* PageVersion should be provided by product-specific code */
+
 #endif
 
index 165454d..de90162 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI SCSI initiator mode messages and structures
  *  Creation Date:  June 23, 2006
  *
- *    mpi2_init.h Version:  02.00.10
+ *    mpi2_init.h Version:  02.00.11
  *
  *  Version History
  *  ---------------
@@ -33,6 +33,7 @@
  *                      Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
  *  02-10-10  02.00.09  Removed unused structure that had "#if 0" around it.
  *  05-12-10  02.00.10  Added optional vendor-unique region to SCSI IO Request.
+ *  11-10-10  02.00.11  Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
  *  --------------------------------------------------------------------------
  */
 
@@ -139,6 +140,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
 #define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT             (4)
 #define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT             (0)
 
+/* number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS                  (4)
+
 /* SCSI IO IoFlags bits */
 
 /* Large CDB Address Space */
index 761cbdb..1f0c190 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.15
+ *  mpi2_ioc.h Version:  02.00.16
  *
  *  Version History
  *  ---------------
  *                      defines.
  *  05-12-10  02.00.15  Marked Task Set Full Event as obsolete.
  *                      Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ *  11-10-10  02.00.16  Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
  *  --------------------------------------------------------------------------
  */
 
@@ -1032,6 +1033,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
 #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID             (0x09)
 #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE             (0x0A)
 #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK    (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
 
 /* FWDownload TransactionContext Element */
 typedef struct _MPI2_FW_DOWNLOAD_TCSGE
index efa0255..83035bd 100644 (file)
@@ -94,7 +94,7 @@ module_param(diag_buffer_enable, int, 0);
 MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
     "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
 
-int mpt2sas_fwfault_debug;
+static int mpt2sas_fwfault_debug;
 MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
     "and halt firmware - (default=0)");
 
@@ -857,7 +857,7 @@ _base_interrupt(int irq, void *bus_id)
        completed_cmds = 0;
        cb_idx = 0xFF;
        do {
-               rd.word = rpf->Words;
+               rd.word = le64_to_cpu(rpf->Words);
                if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
                        goto out;
                reply = 0;
@@ -906,7 +906,7 @@ _base_interrupt(int irq, void *bus_id)
 
  next:
 
-               rpf->Words = ULLONG_MAX;
+               rpf->Words = cpu_to_le64(ULLONG_MAX);
                ioc->reply_post_host_index = (ioc->reply_post_host_index ==
                    (ioc->reply_post_queue_depth - 1)) ? 0 :
                    ioc->reply_post_host_index + 1;
@@ -1740,9 +1740,11 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
 static void
 _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
 {
-       if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
-           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
+       if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+               return;
 
+       switch (ioc->pdev->device) {
+       case MPI2_MFGPAGE_DEVID_SAS2008:
                switch (ioc->pdev->subsystem_device) {
                case MPT2SAS_INTEL_RMS2LL080_SSDID:
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -1752,7 +1754,20 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
                            MPT2SAS_INTEL_RMS2LL040_BRANDING);
                        break;
+               default:
+                       break;
+               }
+       case MPI2_MFGPAGE_DEVID_SAS2308_2:
+               switch (ioc->pdev->subsystem_device) {
+               case MPT2SAS_INTEL_RS25GB008_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RS25GB008_BRANDING);
+                       break;
+               default:
+                       break;
                }
+       default:
+               break;
        }
 }
 
@@ -1817,7 +1832,9 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
        char desc[16];
        u8 revision;
        u32 iounit_pg1_flags;
+       u32 bios_version;
 
+       bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
        pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
        strncpy(desc, ioc->manu_pg0.ChipName, 16);
        printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
@@ -1828,10 +1845,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
           (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
           ioc->facts.FWVersion.Word & 0x000000FF,
           revision,
-          (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24,
-          (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16,
-          (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
-           ioc->bios_pg3.BiosVersion & 0x000000FF);
+          (bios_version & 0xFF000000) >> 24,
+          (bios_version & 0x00FF0000) >> 16,
+          (bios_version & 0x0000FF00) >> 8,
+           bios_version & 0x000000FF);
 
        _base_display_dell_branding(ioc);
        _base_display_intel_branding(ioc);
@@ -2150,7 +2167,7 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
 static int
 _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 {
-       Mpi2IOCFactsReply_t *facts;
+       struct mpt2sas_facts *facts;
        u32 queue_size, queue_diff;
        u16 max_sge_elements;
        u16 num_of_reply_frames;
@@ -2783,7 +2800,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
        int i;
        u8 failed;
        u16 dummy;
-       u32 *mfp;
+       __le32 *mfp;
 
        /* make sure doorbell is not in use */
        if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
@@ -2871,7 +2888,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
        writel(0, &ioc->chip->HostInterruptStatus);
 
        if (ioc->logging_level & MPT_DEBUG_INIT) {
-               mfp = (u32 *)reply;
+               mfp = (__le32 *)reply;
                printk(KERN_INFO "\toffset:data\n");
                for (i = 0; i < reply_bytes/4; i++)
                        printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
@@ -3097,7 +3114,8 @@ static int
 _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
 {
        Mpi2PortFactsRequest_t mpi_request;
-       Mpi2PortFactsReply_t mpi_reply, *pfacts;
+       Mpi2PortFactsReply_t mpi_reply;
+       struct mpt2sas_port_facts *pfacts;
        int mpi_reply_sz, mpi_request_sz, r;
 
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -3139,7 +3157,8 @@ static int
 _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 {
        Mpi2IOCFactsRequest_t mpi_request;
-       Mpi2IOCFactsReply_t mpi_reply, *facts;
+       Mpi2IOCFactsReply_t mpi_reply;
+       struct mpt2sas_facts *facts;
        int mpi_reply_sz, mpi_request_sz, r;
 
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -3225,17 +3244,6 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
        mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
 
-       /* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was
-        * removed and made reserved.  For those with older firmware will need
-        * this fix. It was decided that the Reply and Request frame sizes are
-        * the same.
-        */
-       if ((ioc->facts.HeaderVersion >> 8) < 0xA) {
-               mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz);
-/*             mpi_request.SystemReplyFrameSize =
- *              cpu_to_le16(ioc->reply_sz);
- */
-       }
 
        mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
        mpi_request.ReplyDescriptorPostQueueDepth =
@@ -3243,25 +3251,17 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        mpi_request.ReplyFreeQueueDepth =
            cpu_to_le16(ioc->reply_free_queue_depth);
 
-#if BITS_PER_LONG > 32
        mpi_request.SenseBufferAddressHigh =
-           cpu_to_le32(ioc->sense_dma >> 32);
+           cpu_to_le32((u64)ioc->sense_dma >> 32);
        mpi_request.SystemReplyAddressHigh =
-           cpu_to_le32(ioc->reply_dma >> 32);
+           cpu_to_le32((u64)ioc->reply_dma >> 32);
        mpi_request.SystemRequestFrameBaseAddress =
-           cpu_to_le64(ioc->request_dma);
+           cpu_to_le64((u64)ioc->request_dma);
        mpi_request.ReplyFreeQueueAddress =
-           cpu_to_le64(ioc->reply_free_dma);
+           cpu_to_le64((u64)ioc->reply_free_dma);
        mpi_request.ReplyDescriptorPostQueueAddress =
-           cpu_to_le64(ioc->reply_post_free_dma);
-#else
-       mpi_request.SystemRequestFrameBaseAddress =
-           cpu_to_le32(ioc->request_dma);
-       mpi_request.ReplyFreeQueueAddress =
-           cpu_to_le32(ioc->reply_free_dma);
-       mpi_request.ReplyDescriptorPostQueueAddress =
-           cpu_to_le32(ioc->reply_post_free_dma);
-#endif
+           cpu_to_le64((u64)ioc->reply_post_free_dma);
+
 
        /* This time stamp specifies number of milliseconds
         * since epoch ~ midnight January 1, 1970.
@@ -3271,10 +3271,10 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
            (current_time.tv_usec / 1000));
 
        if (ioc->logging_level & MPT_DEBUG_INIT) {
-               u32 *mfp;
+               __le32 *mfp;
                int i;
 
-               mfp = (u32 *)&mpi_request;
+               mfp = (__le32 *)&mpi_request;
                printk(KERN_INFO "\toffset:data\n");
                for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
                        printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
@@ -3759,7 +3759,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 
        /* initialize Reply Post Free Queue */
        for (i = 0; i < ioc->reply_post_queue_depth; i++)
-               ioc->reply_post_free[i].Words = ULLONG_MAX;
+               ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX);
 
        r = _base_send_ioc_init(ioc, sleep_flag);
        if (r)
index dcc289c..8d5be21 100644 (file)
 #define MPT2SAS_DRIVER_NAME            "mpt2sas"
 #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION    "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION         "08.100.00.02"
-#define MPT2SAS_MAJOR_VERSION          08
+#define MPT2SAS_DRIVER_VERSION         "09.100.00.00"
+#define MPT2SAS_MAJOR_VERSION          09
 #define MPT2SAS_MINOR_VERSION          100
 #define MPT2SAS_BUILD_VERSION          00
-#define MPT2SAS_RELEASE_VERSION                02
+#define MPT2SAS_RELEASE_VERSION                00
 
 /*
  * Set MPT2SAS_SG_DEPTH value based on user input.
                                "Intel Integrated RAID Module RMS2LL080"
 #define MPT2SAS_INTEL_RMS2LL040_BRANDING       \
                                "Intel Integrated RAID Module RMS2LL040"
+#define MPT2SAS_INTEL_RS25GB008_BRANDING       \
+                               "Intel(R) RAID Controller RS25GB008"
 
 /*
  * Intel HBA SSDIDs
  */
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
+#define MPT2SAS_INTEL_RS25GB008_SSDID          0x3000
 
 
 /*
@@ -541,6 +544,63 @@ struct _tr_list {
 
 typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
 
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi2_version_union {
+       MPI2_VERSION_STRUCT             Struct;
+       u32                             Word;
+};
+
+struct mpt2sas_facts {
+       u16                     MsgVersion;
+       u16                     HeaderVersion;
+       u8                      IOCNumber;
+       u8                      VP_ID;
+       u8                      VF_ID;
+       u16                     IOCExceptions;
+       u16                     IOCStatus;
+       u32                     IOCLogInfo;
+       u8                      MaxChainDepth;
+       u8                      WhoInit;
+       u8                      NumberOfPorts;
+       u8                      MaxMSIxVectors;
+       u16                     RequestCredit;
+       u16                     ProductID;
+       u32                     IOCCapabilities;
+       union mpi2_version_union        FWVersion;
+       u16                     IOCRequestFrameSize;
+       u16                     Reserved3;
+       u16                     MaxInitiators;
+       u16                     MaxTargets;
+       u16                     MaxSasExpanders;
+       u16                     MaxEnclosures;
+       u16                     ProtocolFlags;
+       u16                     HighPriorityCredit;
+       u16                     MaxReplyDescriptorPostQueueDepth;
+       u8                      ReplyFrameSize;
+       u8                      MaxVolumes;
+       u16                     MaxDevHandle;
+       u16                     MaxPersistentEntries;
+       u16                     MinDevHandle;
+};
+
+struct mpt2sas_port_facts {
+       u8                      PortNumber;
+       u8                      VP_ID;
+       u8                      VF_ID;
+       u8                      PortType;
+       u16                     MaxPostedCmdBuffers;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+       TM_MUTEX_OFF = 0,
+       TM_MUTEX_ON = 1,
+};
+
 /**
  * struct MPT2SAS_ADAPTER - per adapter struct
  * @list: ioc_list
@@ -703,6 +763,7 @@ struct MPT2SAS_ADAPTER {
         /* misc flags */
        int             aen_event_read_flag;
        u8              broadcast_aen_busy;
+       u16             broadcast_aen_pending;
        u8              shost_recovery;
 
        struct mutex    reset_in_progress_mutex;
@@ -749,8 +810,8 @@ struct MPT2SAS_ADAPTER {
        u32             event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
 
        /* static config pages */
-       Mpi2IOCFactsReply_t facts;
-       Mpi2PortFactsReply_t *pfacts;
+       struct mpt2sas_facts facts;
+       struct mpt2sas_port_facts *pfacts;
        Mpi2ManufacturingPage0_t manu_pg0;
        Mpi2BiosPage2_t bios_pg2;
        Mpi2BiosPage3_t bios_pg3;
@@ -840,7 +901,7 @@ struct MPT2SAS_ADAPTER {
 
        /* reply free queue */
        u16             reply_free_queue_depth;
-       u32             *reply_free;
+       __le32          *reply_free;
        dma_addr_t      reply_free_dma;
        struct dma_pool *reply_free_dma_pool;
        u32             reply_free_host_index;
@@ -932,8 +993,8 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
 u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
 int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
-    uint channel, uint id, uint lun, u8 type, u16 smid_task,
-    ulong timeout, struct scsi_cmnd *scmd);
+       uint channel, uint id, uint lun, u8 type, u16 smid_task,
+       ulong timeout, unsigned long serial_number, enum mutex_type m_type);
 void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
index 437c2d9..38ed026 100644 (file)
@@ -994,7 +994,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
                        mpt2sas_scsih_issue_tm(ioc,
                            le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
                            0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
-                           NULL);
+                           0, TM_MUTEX_ON);
                        ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
                } else
                        mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
@@ -2706,13 +2706,13 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
     _ctl_ioc_reset_count_show, NULL);
 
 struct DIAG_BUFFER_START {
-       u32 Size;
-       u32 DiagVersion;
+       __le32 Size;
+       __le32 DiagVersion;
        u8 BufferType;
        u8 Reserved[3];
-       u32 Reserved1;
-       u32 Reserved2;
-       u32 Reserved3;
+       __le32 Reserved1;
+       __le32 Reserved2;
+       __le32 Reserved3;
 };
 /**
  * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
index 3dcddfe..9731f8e 100644 (file)
@@ -164,7 +164,7 @@ static inline void
 _debug_dump_mf(void *mpi_request, int sz)
 {
        int i;
-       u32 *mfp = (u32 *)mpi_request;
+       __le32 *mfp = (__le32 *)mpi_request;
 
        printk(KERN_INFO "mf:\n\t");
        for (i = 0; i < sz; i++) {
index a7dbc68..939f283 100644 (file)
@@ -94,6 +94,10 @@ static u32 logging_level;
 MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
     "(default=0)");
 
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192  default=8192");
+
 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
 #define MPT2SAS_MAX_LUN (16895)
 static int max_lun = MPT2SAS_MAX_LUN;
@@ -1956,7 +1960,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
                case MPI2_RAID_VOL_TYPE_RAID1E:
                        qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
                        if (ioc->manu_pg10.OEMIdentifier &&
-                           (ioc->manu_pg10.GenericFlags0 &
+                           (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
                            MFG10_GF0_R10_DISPLAY) &&
                            !(raid_device->num_pds % 2))
                                r_level = "RAID10";
@@ -2236,6 +2240,8 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  * @smid_task: smid assigned to the task
  * @timeout: timeout in seconds
+ * @serial_number: the serial_number from scmd
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
  * Context: user
  *
  * A generic API for sending task management requests to firmware.
@@ -2247,17 +2253,18 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 int
 mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
     uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
-    struct scsi_cmnd *scmd)
+       unsigned long serial_number, enum mutex_type m_type)
 {
        Mpi2SCSITaskManagementRequest_t *mpi_request;
        Mpi2SCSITaskManagementReply_t *mpi_reply;
        u16 smid = 0;
        u32 ioc_state;
        unsigned long timeleft;
-       struct scsi_cmnd *scmd_lookup;
+       struct scsiio_tracker *scsi_lookup = NULL;
        int rc;
 
-       mutex_lock(&ioc->tm_cmds.mutex);
+       if (m_type == TM_MUTEX_ON)
+               mutex_lock(&ioc->tm_cmds.mutex);
        if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
                printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
                    __func__, ioc->name);
@@ -2277,18 +2284,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
        if (ioc_state & MPI2_DOORBELL_USED) {
                dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
                    "active!\n", ioc->name));
-               mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+               rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
                    FORCE_BIG_HAMMER);
-               rc = SUCCESS;
+               rc = (!rc) ? SUCCESS : FAILED;
                goto err_out;
        }
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
                mpt2sas_base_fault_info(ioc, ioc_state &
                    MPI2_DOORBELL_DATA_MASK);
-               mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+               rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
                    FORCE_BIG_HAMMER);
-               rc = SUCCESS;
+               rc = (!rc) ? SUCCESS : FAILED;
                goto err_out;
        }
 
@@ -2300,6 +2307,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
                goto err_out;
        }
 
+       if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+               scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
        dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
            " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type,
            smid_task));
@@ -2307,6 +2317,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
        mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
        ioc->tm_cmds.smid = smid;
        memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+       memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
        mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
        mpi_request->DevHandle = cpu_to_le16(handle);
        mpi_request->TaskType = type;
@@ -2322,9 +2333,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
                if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
-                       mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+                       rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
                            FORCE_BIG_HAMMER);
-                       rc = SUCCESS;
+                       rc = (!rc) ? SUCCESS : FAILED;
                        ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
                        mpt2sas_scsih_clear_tm_flag(ioc, handle);
                        goto err_out;
@@ -2346,20 +2357,12 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
                }
        }
 
-       /* sanity check:
-        * Check to see the commands were terminated.
-        * This is only needed for eh callbacks, hence the scmd check.
-        */
-       rc = FAILED;
-       if (scmd == NULL)
-               goto bypass_sanity_checks;
        switch (type) {
        case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
-               scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
-               if (scmd_lookup)
-                       rc = FAILED;
-               else
-                       rc = SUCCESS;
+               rc = SUCCESS;
+               if (scsi_lookup->scmd == NULL)
+                       break;
+               rc = FAILED;
                break;
 
        case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
@@ -2369,24 +2372,31 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
                        rc = SUCCESS;
                break;
 
+       case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
        case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
                if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
                        rc = FAILED;
                else
                        rc = SUCCESS;
                break;
+       case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+               rc = SUCCESS;
+               break;
+       default:
+               rc = FAILED;
+               break;
        }
 
- bypass_sanity_checks:
-
        mpt2sas_scsih_clear_tm_flag(ioc, handle);
        ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
-       mutex_unlock(&ioc->tm_cmds.mutex);
+       if (m_type == TM_MUTEX_ON)
+               mutex_unlock(&ioc->tm_cmds.mutex);
 
        return rc;
 
  err_out:
-       mutex_unlock(&ioc->tm_cmds.mutex);
+       if (m_type == TM_MUTEX_ON)
+               mutex_unlock(&ioc->tm_cmds.mutex);
        return rc;
 }
 
@@ -2496,7 +2506,8 @@ _scsih_abort(struct scsi_cmnd *scmd)
        handle = sas_device_priv_data->sas_target->handle;
        r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
+           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+           scmd->serial_number, TM_MUTEX_ON);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2557,7 +2568,8 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
 
        r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
+           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
+           TM_MUTEX_ON);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2617,7 +2629,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
 
        r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
-           30, scmd);
+           30, 0, TM_MUTEX_ON);
 
  out:
        starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2749,6 +2761,31 @@ _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
        }
 }
 
+/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
+{
+       struct MPT2SAS_DEVICE *sas_device_priv_data;
+       struct scsi_device *sdev;
+
+       shost_for_each_device(sdev, ioc->shost) {
+               sas_device_priv_data = sdev->hostdata;
+               if (!sas_device_priv_data)
+                       continue;
+               if (!sas_device_priv_data->block)
+                       continue;
+               sas_device_priv_data->block = 0;
+               dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
+                   "handle(0x%04x)\n",
+                   sas_device_priv_data->sas_target->handle));
+               scsi_internal_device_unblock(sdev);
+       }
+}
 /**
  * _scsih_ublock_io_device - set the device state to SDEV_RUNNING
  * @ioc: per adapter object
@@ -2778,6 +2815,34 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        }
 }
 
+/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT2SAS_ADAPTER *ioc)
+{
+       struct MPT2SAS_DEVICE *sas_device_priv_data;
+       struct scsi_device *sdev;
+
+       shost_for_each_device(sdev, ioc->shost) {
+               sas_device_priv_data = sdev->hostdata;
+               if (!sas_device_priv_data)
+                       continue;
+               if (sas_device_priv_data->block)
+                       continue;
+               sas_device_priv_data->block = 1;
+               dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_blocked, "
+                   "handle(0x%04x)\n",
+                   sas_device_priv_data->sas_target->handle));
+               scsi_internal_device_block(sdev);
+       }
+}
+
+
 /**
  * _scsih_block_io_device - set the device state to SDEV_BLOCK
  * @ioc: per adapter object
@@ -3698,7 +3763,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
                return 0;
        }
 
-       if (ioc->pci_error_recovery) {
+       if (ioc->pci_error_recovery || ioc->remove_host) {
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
                return 0;
@@ -4598,7 +4663,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        Mpi2SasEnclosurePage0_t enclosure_pg0;
        u32 ioc_status;
        u16 parent_handle;
-       __le64 sas_address, sas_address_parent = 0;
+       u64 sas_address, sas_address_parent = 0;
        int i;
        unsigned long flags;
        struct _sas_port *mpt2sas_port = NULL;
@@ -5380,9 +5445,10 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
                break;
        }
        printk(MPT2SAS_INFO_FMT "device status change: (%s)\n"
-           "\thandle(0x%04x), sas address(0x%016llx)", ioc->name,
-           reason_str, le16_to_cpu(event_data->DevHandle),
-           (unsigned long long)le64_to_cpu(event_data->SASAddress));
+           "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+           ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+           (unsigned long long)le64_to_cpu(event_data->SASAddress),
+           le16_to_cpu(event_data->TaskTag));
        if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
                printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
                    event_data->ASC, event_data->ASCQ);
@@ -5404,7 +5470,7 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
 {
        struct MPT2SAS_TARGET *target_priv_data;
        struct _sas_device *sas_device;
-       __le64 sas_address;
+       u64 sas_address;
        unsigned long flags;
        Mpi2EventDataSasDeviceStatusChange_t *event_data =
            fw_event->event_data;
@@ -5522,25 +5588,38 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
        u32 termination_count;
        u32 query_count;
        Mpi2SCSITaskManagementReply_t *mpi_reply;
-#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
        Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
-#endif
        u16 ioc_status;
        unsigned long flags;
        int r;
+       u8 max_retries = 0;
+       u8 task_abort_retries;
 
-       dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primitive: "
-           "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
-           event_data->PortWidth));
-       dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
-           __func__));
+       mutex_lock(&ioc->tm_cmds.mutex);
+       dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), "
+           "width(%d)\n", ioc->name, __func__, event_data->PhyNum,
+            event_data->PortWidth));
+
+       _scsih_block_io_all_device(ioc);
 
        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
-       ioc->broadcast_aen_busy = 0;
+       mpi_reply = ioc->tm_cmds.reply;
+broadcast_aen_retry:
+
+       /* sanity checks for retrying this loop */
+       if (max_retries++ == 5) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: giving up\n",
+                   ioc->name, __func__));
+               goto out;
+       } else if (max_retries > 1)
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %d retry\n",
+                   ioc->name, __func__, max_retries - 1));
+
        termination_count = 0;
        query_count = 0;
-       mpi_reply = ioc->tm_cmds.reply;
        for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+               if (ioc->ioc_reset_in_progress_status)
+                       goto out;
                scmd = _scsih_scsi_lookup_get(ioc, smid);
                if (!scmd)
                        continue;
@@ -5561,34 +5640,90 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
                lun = sas_device_priv_data->lun;
                query_count++;
 
+               if (ioc->ioc_reset_in_progress_status)
+                       goto out;
+
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-               mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
-               ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+               r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+                   TM_MUTEX_OFF);
+               if (r == FAILED) {
+                       sdev_printk(KERN_WARNING, sdev,
+                           "mpt2sas_scsih_issue_tm: FAILED when sending "
+                           "QUERY_TASK: scmd(%p)\n", scmd);
+                       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+                       goto broadcast_aen_retry;
+               }
                ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
                    & MPI2_IOCSTATUS_MASK;
-               if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
-                   (mpi_reply->ResponseCode ==
+               if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+                       sdev_printk(KERN_WARNING, sdev, "query task: FAILED "
+                           "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status,
+                           scmd);
+                       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+                       goto broadcast_aen_retry;
+               }
+
+               /* see if IO is still owned by IOC and target */
+               if (mpi_reply->ResponseCode ==
                     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
                     mpi_reply->ResponseCode ==
-                    MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
+                    MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
                        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
                        continue;
                }
+               task_abort_retries = 0;
+ tm_retry:
+               if (task_abort_retries++ == 60) {
+                       dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+                           "%s: ABORT_TASK: giving up\n", ioc->name,
+                           __func__));
+                       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+                       goto broadcast_aen_retry;
+               }
+
+               if (ioc->ioc_reset_in_progress_status)
+                       goto out_no_lock;
+
                r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
                    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-                   scmd);
-               if (r == FAILED)
-                       sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
+                   scmd->serial_number, TM_MUTEX_OFF);
+               if (r == FAILED) {
+                       sdev_printk(KERN_WARNING, sdev,
+                           "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
                            "scmd(%p)\n", scmd);
+                       goto tm_retry;
+               }
+
+               if (task_abort_retries > 1)
+                       sdev_printk(KERN_WARNING, sdev,
+                           "mpt2sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+                           " scmd(%p)\n",
+                           task_abort_retries - 1, scmd);
+
                termination_count += le32_to_cpu(mpi_reply->TerminationCount);
                spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
        }
+
+       if (ioc->broadcast_aen_pending) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: loop back due to"
+                    " pending AEN\n", ioc->name, __func__));
+                ioc->broadcast_aen_pending = 0;
+                goto broadcast_aen_retry;
+       }
+
+ out:
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
 
-       dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
+       dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
            "%s - exit, query_count = %d termination_count = %d\n",
            ioc->name, __func__, query_count, termination_count));
+
+       ioc->broadcast_aen_busy = 0;
+       if (!ioc->ioc_reset_in_progress_status)
+               _scsih_ublock_io_all_device(ioc);
+       mutex_unlock(&ioc->tm_cmds.mutex);
 }
 
 /**
@@ -6566,7 +6701,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
        Mpi2ExpanderPage0_t expander_pg0;
        Mpi2ConfigReply_t mpi_reply;
        u16 ioc_status;
-       __le64 sas_address;
+       u64 sas_address;
        u16 handle;
 
        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
@@ -6862,10 +6997,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
                    mpi_reply->EventData;
 
                if (baen_data->Primitive !=
-                   MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT ||
-                   ioc->broadcast_aen_busy)
+                   MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
                        return 1;
-               ioc->broadcast_aen_busy = 1;
+
+               if (ioc->broadcast_aen_busy) {
+                       ioc->broadcast_aen_pending++;
+                       return 1;
+               } else
+                       ioc->broadcast_aen_busy = 1;
                break;
        }
 
@@ -7211,7 +7350,6 @@ _scsih_remove(struct pci_dev *pdev)
        }
 
        sas_remove_host(shost);
-       _scsih_shutdown(pdev);
        list_del(&ioc->list);
        scsi_remove_host(shost);
        scsi_host_put(shost);
@@ -7436,6 +7574,25 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        shost->transportt = mpt2sas_transport_template;
        shost->unique_id = ioc->id;
 
+       if (max_sectors != 0xFFFF) {
+               if (max_sectors < 64) {
+                       shost->max_sectors = 64;
+                       printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+                           "for max_sectors, range is 64 to 8192. Assigning "
+                           "value of 64.\n", ioc->name, max_sectors);
+               } else if (max_sectors > 8192) {
+                       shost->max_sectors = 8192;
+                       printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+                           "for max_sectors, range is 64 to 8192. Assigning "
+                           "default value of 8192.\n", ioc->name,
+                           max_sectors);
+               } else {
+                       shost->max_sectors = max_sectors & 0xFFFE;
+                       printk(MPT2SAS_INFO_FMT "The max_sectors value is "
+                           "set to %d\n", ioc->name, shost->max_sectors);
+               }
+       }
+
        if ((scsi_add_host(shost, &pdev->dev))) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
@@ -7505,7 +7662,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
-       u32 device_state;
+       pci_power_t device_state;
 
        mpt2sas_base_stop_watchdog(ioc);
        scsi_block_requests(shost);
@@ -7532,7 +7689,7 @@ _scsih_resume(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
-       u32 device_state = pdev->current_state;
+       pci_power_t device_state = pdev->current_state;
        int r;
 
        printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous "
index cb1cdec..15c7980 100644 (file)
@@ -299,7 +299,6 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
        void *data_out = NULL;
        dma_addr_t data_out_dma;
        u32 sz;
-       u64 *sas_address_le;
        u16 wait_state_count;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -372,8 +371,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
        mpi_request->PhysicalPort = 0xFF;
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
-       sas_address_le = (u64 *)&mpi_request->SASAddress;
-       *sas_address_le = cpu_to_le64(sas_address);
+       mpi_request->SASAddress = cpu_to_le64(sas_address);
        mpi_request->RequestDataLength =
            cpu_to_le16(sizeof(struct rep_manu_request));
        psge = &mpi_request->SGL;
@@ -1049,14 +1047,14 @@ struct phy_error_log_reply{
        u8 function; /* 0x11 */
        u8 function_result;
        u8 response_length;
-       u16 expander_change_count;
+       __be16 expander_change_count;
        u8 reserved_1[3];
        u8 phy_identifier;
        u8 reserved_2[2];
-       u32 invalid_dword;
-       u32 running_disparity_error;
-       u32 loss_of_dword_sync;
-       u32 phy_reset_problem;
+       __be32 invalid_dword;
+       __be32 running_disparity_error;
+       __be32 loss_of_dword_sync;
+       __be32 phy_reset_problem;
 };
 
 /**
@@ -1085,7 +1083,6 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
        void *data_out = NULL;
        dma_addr_t data_out_dma;
        u32 sz;
-       u64 *sas_address_le;
        u16 wait_state_count;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -1160,8 +1157,7 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
        mpi_request->PhysicalPort = 0xFF;
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
-       sas_address_le = (u64 *)&mpi_request->SASAddress;
-       *sas_address_le = cpu_to_le64(phy->identify.sas_address);
+       mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
        mpi_request->RequestDataLength =
            cpu_to_le16(sizeof(struct phy_error_log_request));
        psge = &mpi_request->SGL;
@@ -1406,7 +1402,6 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
        void *data_out = NULL;
        dma_addr_t data_out_dma;
        u32 sz;
-       u64 *sas_address_le;
        u16 wait_state_count;
 
        if (ioc->shost_recovery) {
@@ -1486,8 +1481,7 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
        mpi_request->PhysicalPort = 0xFF;
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
-       sas_address_le = (u64 *)&mpi_request->SASAddress;
-       *sas_address_le = cpu_to_le64(phy->identify.sas_address);
+       mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
        mpi_request->RequestDataLength =
            cpu_to_le16(sizeof(struct phy_error_log_request));
        psge = &mpi_request->SGL;
@@ -1914,7 +1908,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        mpi_request->PhysicalPort = 0xFF;
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
-       *((u64 *)&mpi_request->SASAddress) = (rphy) ?
+       mpi_request->SASAddress = (rphy) ?
            cpu_to_le64(rphy->identify.sas_address) :
            cpu_to_le64(ioc->sas_hba.sas_address);
        mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
index 82e9e5c..cf8dfab 100644 (file)
@@ -197,6 +197,7 @@ static struct {
        {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
        {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
        {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
+       {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
        {"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
        {"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
        {"INSITE", "I325VM", NULL, BLIST_KEY},
@@ -243,6 +244,7 @@ static struct {
        {"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
        {"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
        {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
+       {"Traxdata", "CDR4120", NULL, BLIST_NOLUN},     /* locks up */
        {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
        {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN},
        {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN},
index eb7a3e8..eba183c 100644 (file)
@@ -160,6 +160,10 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
        return NULL;
 }
 
+/* For device slot and array device slot elements, byte 3 bit 6
+ * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this
+ * code stands these bits are shifted 4 positions right so in
+ * sysfs they will appear as bits 2 and 1 respectively. Strange. */
 static void ses_get_fault(struct enclosure_device *edev,
                          struct enclosure_component *ecomp)
 {
@@ -181,7 +185,7 @@ static int ses_set_fault(struct enclosure_device *edev,
                /* zero is disabled */
                break;
        case ENCLOSURE_SETTING_ENABLED:
-               desc[2] = 0x02;
+               desc[3] = 0x20;
                break;
        default:
                /* SES doesn't do the SGPIO blink settings */
index 07eaef1..7e12a2e 100644 (file)
  *    inside the execution of NCR5380_intr(), leading to recursive
  *    calls.
  *
- *  - I've added a function merge_contiguous_buffers() that tries to
- *    merge scatter-gather buffers that are located at contiguous
- *    physical addresses and can be processed with the same DMA setup.
- *    Since most scatter-gather operations work on a page (4K) of
- *    4 buffers (1K), in more than 90% of all cases three interrupts and
- *    DMA setup actions are saved.
- *
  * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
  *    and USLEEP, because these were messing up readability and will never be
  *    needed for Atari SCSI.
@@ -266,8 +259,9 @@ static struct scsi_host_template *the_template = NULL;
        (struct NCR5380_hostdata *)(in)->hostdata
 #define        HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
 
-#define        NEXT(cmd)       (*(struct scsi_cmnd **)&((cmd)->host_scribble))
-#define        NEXTADDR(cmd)   ((struct scsi_cmnd **)&((cmd)->host_scribble))
+#define        NEXT(cmd)               ((struct scsi_cmnd *)(cmd)->host_scribble)
+#define        SET_NEXT(cmd, next)     ((cmd)->host_scribble = (void *)(next))
+#define        NEXTADDR(cmd)           ((struct scsi_cmnd **)&((cmd)->host_scribble))
 
 #define        HOSTNO          instance->host_no
 #define        H_NO(cmd)       (cmd)->device->host->host_no
@@ -458,47 +452,6 @@ static void free_all_tags( void )
 #endif /* SUPPORT_TAGS */
 
 
-/*
- * Function: void merge_contiguous_buffers(struct scsi_cmnd *cmd)
- *
- * Purpose: Try to merge several scatter-gather requests into one DMA
- *    transfer. This is possible if the scatter buffers lie on
- *    physical contiguous addresses.
- *
- * Parameters: struct scsi_cmnd *cmd
- *    The command to work on. The first scatter buffer's data are
- *    assumed to be already transferred into ptr/this_residual.
- */
-
-static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
-{
-    unsigned long endaddr;
-#if (NDEBUG & NDEBUG_MERGING)
-    unsigned long oldlen = cmd->SCp.this_residual;
-    int                  cnt = 1;
-#endif
-
-    for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
-        cmd->SCp.buffers_residual &&
-        virt_to_phys(SGADDR(&(cmd->SCp.buffer[1]))) == endaddr; ) {
-       
-       MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
-                  SGADDR(&(cmd->SCp.buffer[1])), endaddr);
-#if (NDEBUG & NDEBUG_MERGING)
-       ++cnt;
-#endif
-       ++cmd->SCp.buffer;
-       --cmd->SCp.buffers_residual;
-       cmd->SCp.this_residual += cmd->SCp.buffer->length;
-       endaddr += cmd->SCp.buffer->length;
-    }
-#if (NDEBUG & NDEBUG_MERGING)
-    if (oldlen != cmd->SCp.this_residual)
-       MER_PRINTK("merged %d buffers from %p, new length %08x\n",
-                  cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
-#endif
-}
-
 /*
  * Function : void initialize_SCp(struct scsi_cmnd *cmd)
  *
@@ -520,11 +473,6 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
        cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
        cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
        cmd->SCp.this_residual = cmd->SCp.buffer->length;
-
-       /* ++roman: Try to merge some scatter-buffers if they are at
-        * contiguous physical addresses.
-        */
-//     merge_contiguous_buffers( cmd );
     } else {
        cmd->SCp.buffer = NULL;
        cmd->SCp.buffers_residual = 0;
@@ -841,7 +789,7 @@ static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer,
  * 
  */
 
-static int NCR5380_init (struct Scsi_Host *instance, int flags)
+static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
 {
     int i;
     SETUP_HOSTDATA(instance);
@@ -889,6 +837,11 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags)
     return 0;
 }
 
+static void NCR5380_exit(struct Scsi_Host *instance)
+{
+       /* Empty, as we didn't schedule any delayed work */
+}
+
 /* 
  * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd,
  *     void (*done)(struct scsi_cmnd *))
@@ -962,7 +915,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
      * in a queue 
      */
 
-    NEXT(cmd) = NULL;
+    SET_NEXT(cmd, NULL);
     cmd->scsi_done = done;
 
     cmd->result = 0;
@@ -990,14 +943,14 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
      */
     if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
        LIST(cmd, hostdata->issue_queue);
-       NEXT(cmd) = hostdata->issue_queue;
+       SET_NEXT(cmd, hostdata->issue_queue);
        hostdata->issue_queue = cmd;
     } else {
        for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
             NEXT(tmp); tmp = NEXT(tmp))
            ;
        LIST(cmd, tmp);
-       NEXT(tmp) = cmd;
+       SET_NEXT(tmp, cmd);
     }
 
     local_irq_restore(flags);
@@ -1105,12 +1058,12 @@ static void NCR5380_main (struct work_struct *bl)
                    local_irq_disable();
                    if (prev) {
                        REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
-                       NEXT(prev) = NEXT(tmp);
+                       SET_NEXT(prev, NEXT(tmp));
                    } else {
                        REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
                        hostdata->issue_queue = NEXT(tmp);
                    }
-                   NEXT(tmp) = NULL;
+                   SET_NEXT(tmp, NULL);
                    
                    /* reenable interrupts after finding one */
                    local_irq_restore(flags);
@@ -1144,7 +1097,7 @@ static void NCR5380_main (struct work_struct *bl)
                    } else {
                        local_irq_disable();
                        LIST(tmp, hostdata->issue_queue);
-                       NEXT(tmp) = hostdata->issue_queue;
+                       SET_NEXT(tmp, hostdata->issue_queue);
                        hostdata->issue_queue = tmp;
 #ifdef SUPPORT_TAGS
                        cmd_free_tag( tmp );
@@ -1439,7 +1392,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
     local_irq_restore(flags);
 
     /* Wait for arbitration logic to complete */
-#if NCR_TIMEOUT
+#ifdef NCR_TIMEOUT
     {
       unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
 
@@ -2070,11 +2023,6 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    --cmd->SCp.buffers_residual;
                    cmd->SCp.this_residual = cmd->SCp.buffer->length;
                    cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
-
-                   /* ++roman: Try to merge some scatter-buffers if
-                    * they are at contiguous physical addresses.
-                    */
-//                 merge_contiguous_buffers( cmd );
                    INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
                               HOSTNO, cmd->SCp.this_residual,
                               cmd->SCp.buffers_residual);
@@ -2274,7 +2222,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
 
                        local_irq_save(flags);
                        LIST(cmd,hostdata->issue_queue);
-                       NEXT(cmd) = hostdata->issue_queue;
+                       SET_NEXT(cmd, hostdata->issue_queue);
                        hostdata->issue_queue = (struct scsi_cmnd *) cmd;
                        local_irq_restore(flags);
                        QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
@@ -2330,7 +2278,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    local_irq_save(flags);
                    cmd->device->disconnect = 1;
                    LIST(cmd,hostdata->disconnected_queue);
-                   NEXT(cmd) = hostdata->disconnected_queue;
+                   SET_NEXT(cmd, hostdata->disconnected_queue);
                    hostdata->connected = NULL;
                    hostdata->disconnected_queue = cmd;
                    local_irq_restore(flags);
@@ -2589,12 +2537,12 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
            ) {
            if (prev) {
                REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
-               NEXT(prev) = NEXT(tmp);
+               SET_NEXT(prev, NEXT(tmp));
            } else {
                REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
                hostdata->disconnected_queue = NEXT(tmp);
            }
-           NEXT(tmp) = NULL;
+           SET_NEXT(tmp, NULL);
            break;
        }
     }
@@ -2762,7 +2710,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
        if (cmd == tmp) {
            REMOVE(5, *prev, tmp, NEXT(tmp));
            (*prev) = NEXT(tmp);
-           NEXT(tmp) = NULL;
+           SET_NEXT(tmp, NULL);
            tmp->result = DID_ABORT << 16;
            local_irq_restore(flags);
            ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
@@ -2835,7 +2783,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
                    if (cmd == tmp) {
                    REMOVE(5, *prev, tmp, NEXT(tmp));
                    *prev = NEXT(tmp);
-                   NEXT(tmp) = NULL;
+                   SET_NEXT(tmp, NULL);
                    tmp->result = DID_ABORT << 16;
                    /* We must unlock the tag/LUN immediately here, since the
                     * target goes to BUS FREE and doesn't send us another
@@ -2943,7 +2891,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
 
     for (i = 0; (cmd = disconnected_queue); ++i) {
        disconnected_queue = NEXT(cmd);
-       NEXT(cmd) = NULL;
+       SET_NEXT(cmd, NULL);
        cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
        cmd->scsi_done( cmd );
     }
index 613f588..baf7328 100644 (file)
 #include <asm/idprom.h>
 #include <asm/machines.h>
 
+#define NDEBUG 0
+
+#define NDEBUG_ABORT           0x00100000
+#define NDEBUG_TAGS            0x00200000
+#define NDEBUG_MERGING         0x00400000
+
 /* dma on! */
 #define REAL_DMA
 
@@ -86,8 +92,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
 /*#define RESET_BOOT */
 #define DRIVER_SETUP
 
-#define NDEBUG 0
-
 /*
  * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
  */
@@ -195,7 +199,7 @@ static struct Scsi_Host *default_instance;
  *
  */
  
-int sun3scsi_detect(struct scsi_host_template * tpnt)
+int __init sun3scsi_detect(struct scsi_host_template * tpnt)
 {
        unsigned long ioaddr;
        static int called = 0;
@@ -314,6 +318,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt)
 
        iounmap((void *)sun3_scsi_regp);
 
+       NCR5380_exit(shpnt);
        return 0;
 }
 
index 7c526b8..fbba78e 100644 (file)
 /* dma on! */
 #define REAL_DMA
 
+#define NDEBUG 0
+
+#define NDEBUG_ABORT           0x00100000
+#define NDEBUG_TAGS            0x00200000
+#define NDEBUG_MERGING         0x00400000
+
 #include "scsi.h"
 #include "initio.h"
 #include <scsi/scsi_host.h>
@@ -50,8 +56,6 @@ extern int sun3_map_test(unsigned long, char *);
 /*#define RESET_BOOT */
 #define DRIVER_SETUP
 
-#define NDEBUG 0
-
 /*
  * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
  */
@@ -137,7 +141,7 @@ static struct Scsi_Host *default_instance;
  *
  */
  
-static int sun3scsi_detect(struct scsi_host_template * tpnt)
+static int __init sun3scsi_detect(struct scsi_host_template * tpnt)
 {
        unsigned long ioaddr, irq = 0;
        static int called = 0;
@@ -283,6 +287,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt)
 
        iounmap((void *)sun3_scsi_regp);
 
+       NCR5380_exit(shpnt);
        return 0;
 }
 
index b2a1067..328ea2b 100644 (file)
@@ -266,8 +266,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
                                cmd->sg_cnt =
                                        T_TASK(se_cmd)->t_tasks_sg_chained_no;
                        }
-                       if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
-                                                   cmd->sg, cmd->sg_cnt))
+                       if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
+                                                           cmd->sg,
+                                                           cmd->sg_cnt))
                                cmd->was_ddp_setup = 1;
                }
        }
@@ -379,12 +380,23 @@ static void ft_send_resp_status(struct fc_lport *lport,
 
 /*
  * Send error or task management response.
- * Always frees the cmd and associated state.
  */
-static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
+static void ft_send_resp_code(struct ft_cmd *cmd,
+                             enum fcp_resp_rsp_codes code)
 {
        ft_send_resp_status(cmd->sess->tport->lport,
                            cmd->req_frame, SAM_STAT_GOOD, code);
+}
+
+
+/*
+ * Send error or task management response.
+ * Always frees the cmd and associated state.
+ */
+static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
+                                     enum fcp_resp_rsp_codes code)
+{
+       ft_send_resp_code(cmd, code);
        ft_free_cmd(cmd);
 }
 
@@ -422,7 +434,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
                 * tm_flags set is invalid.
                 */
                FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
-               ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
+               ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
                return;
        }
 
@@ -430,7 +442,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
        tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
        if (!tmr) {
                FT_TM_DBG("alloc failed\n");
-               ft_send_resp_code(cmd, FCP_TMF_FAILED);
+               ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
                return;
        }
        cmd->se_cmd.se_tmr_req = tmr;
@@ -669,7 +681,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
        return;
 
 err:
-       ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
+       ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
        return;
 }
 
index f1e6c18..f0a2f8b 100644 (file)
@@ -92,6 +92,13 @@ struct iscsi_boot_kobj {
         * properties.
         */
        mode_t (*is_visible) (void *data, int type);
+
+       /*
+        * Driver specific release function.
+        *
+        * The function should free the data passed in.
+        */
+       void (*release) (void *data);
 };
 
 struct iscsi_boot_kset {
@@ -103,18 +110,21 @@ struct iscsi_boot_kobj *
 iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
                            void *data,
                            ssize_t (*show) (void *data, int type, char *buf),
-                           mode_t (*is_visible) (void *data, int type));
+                           mode_t (*is_visible) (void *data, int type),
+                           void (*release) (void *data));
 
 struct iscsi_boot_kobj *
 iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
                           void *data,
                           ssize_t (*show) (void *data, int type, char *buf),
-                          mode_t (*is_visible) (void *data, int type));
+                          mode_t (*is_visible) (void *data, int type),
+                          void (*release) (void *data));
 struct iscsi_boot_kobj *
 iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
                         void *data,
                         ssize_t (*show) (void *data, int type, char *buf),
-                        mode_t (*is_visible) (void *data, int type));
+                        mode_t (*is_visible) (void *data, int type),
+                        void (*release) (void *data));
 
 struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
 struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
index dd0a52c..ea68b3c 100644 (file)
@@ -60,7 +60,7 @@ struct iscsi_hdr {
        uint8_t         rsvd2[2];
        uint8_t         hlength;        /* AHSs total length */
        uint8_t         dlength[3];     /* Data length */
-       uint8_t         lun[8];
+       struct scsi_lun lun;
        itt_t           itt;            /* Initiator Task Tag, opaque for target */
        __be32          ttt;            /* Target Task Tag */
        __be32          statsn;
@@ -122,7 +122,7 @@ struct iscsi_cmd {
        __be16 rsvd2;
        uint8_t hlength;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;   /* Initiator Task Tag */
        __be32 data_length;
        __be32 cmdsn;
@@ -198,7 +198,7 @@ struct iscsi_async {
        uint8_t rsvd2[2];
        uint8_t rsvd3;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        uint8_t rsvd4[8];
        __be32  statsn;
        __be32  exp_cmdsn;
@@ -226,7 +226,7 @@ struct iscsi_nopout {
        __be16  rsvd2;
        uint8_t rsvd3;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;   /* Initiator Task Tag */
        __be32  ttt;    /* Target Transfer Tag */
        __be32  cmdsn;
@@ -241,7 +241,7 @@ struct iscsi_nopin {
        __be16  rsvd2;
        uint8_t rsvd3;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;   /* Initiator Task Tag */
        __be32  ttt;    /* Target Transfer Tag */
        __be32  statsn;
@@ -257,7 +257,7 @@ struct iscsi_tm {
        uint8_t rsvd1[2];
        uint8_t hlength;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;   /* Initiator Task Tag */
        itt_t    rtt;   /* Reference Task Tag */
        __be32  cmdsn;
@@ -315,7 +315,7 @@ struct iscsi_r2t_rsp {
        uint8_t rsvd2[2];
        uint8_t hlength;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;   /* Initiator Task Tag */
        __be32  ttt;    /* Target Transfer Tag */
        __be32  statsn;
@@ -333,7 +333,7 @@ struct iscsi_data {
        uint8_t rsvd2[2];
        uint8_t rsvd3;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;
        __be32  ttt;
        __be32  rsvd4;
@@ -353,7 +353,7 @@ struct iscsi_data_rsp {
        uint8_t cmd_status;
        uint8_t hlength;
        uint8_t dlength[3];
-       uint8_t lun[8];
+       struct scsi_lun lun;
        itt_t    itt;
        __be32  ttt;
        __be32  statsn;
index a3cbda4..7d96829 100644 (file)
@@ -510,6 +510,14 @@ struct libfc_function_template {
         * STATUS: OPTIONAL
         */
        int (*ddp_done)(struct fc_lport *, u16);
+       /*
+        * Sets up the DDP context for a given exchange id on the given
+        * scatterlist if LLD supports DDP for FCoE target.
+        *
+        * STATUS: OPTIONAL
+        */
+       int (*ddp_target)(struct fc_lport *, u16, struct scatterlist *,
+                         unsigned int);
        /*
         * Allow LLD to fill its own Link Error Status Block
         *
index 0f43677..cedcff3 100644 (file)
@@ -115,7 +115,7 @@ struct iscsi_task {
        /* copied values in case we need to send tmfs */
        itt_t                   hdr_itt;
        __be32                  cmdsn;
-       uint8_t                 lun[8];
+       struct scsi_lun         lun;
 
        int                     itt;            /* this ITT */