Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 2 Mar 2013 19:42:16 +0000 (11:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 2 Mar 2013 19:42:16 +0000 (11:42 -0800)
Pull SCSI updates from James Bottomley:
 "This is an assorted set of stragglers into the merge window with
  driver updates for qla2xxx, megaraid_sas, storvsc and ufs.

  It also includes pulls of the uapi tree (all the remaining SCSI
  pieces) and the fcoe tree (updates to fcoe and libfc)"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (81 commits)
  [SCSI] ufs: Separate PCI code into glue driver
  [SCSI] ufs: Segregate PCI Specific Code
  [SCSI] scsi: fix lpfc build when wmb() is defined as mb()
  [SCSI] storvsc: Handle dynamic resizing of the device
  [SCSI] storvsc: Restructure error handling code on command completion
  [SCSI] storvsc: avoid usage of WRITE_SAME
  [SCSI] aacraid: suppress two GCC warnings
  [SCSI] hpsa: check for dma_mapping_error in hpsa_passthru ioctls
  [SCSI] hpsa: reorganize error handling in hpsa_passthru_ioctl
  [SCSI] hpsa: check for dma_mapping_error in hpsa_map_sg_chain_block
  [SCSI] hpsa: Check for dma_mapping_error for all code paths using fill_cmd
  [SCSI] hpsa: Check for dma_mapping_error in hpsa_map_one
  [SCSI] dc395x: uninitialized variable in device_alloc()
  [SCSI] Fix range check in scsi_host_dif_capable()
  [SCSI] storvsc: Initialize the sglist
  [SCSI] mpt2sas: Add support for OEM specific controller
  [SCSI] ipr: Fix oops while resetting an ipr adapter
  [SCSI] fnic: Fnic Trace Utility
  [SCSI] fnic: New debug flags and debug log messages
  [SCSI] fnic: fnic driver may hit BUG_ON on device reset
  ...

83 files changed:
Documentation/ABI/testing/sysfs-bus-fcoe
Documentation/scsi/ChangeLog.megaraid_sas
drivers/scsi/aacraid/src.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/dc395x.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fcoe/fcoe_sysfs.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/fcoe/libfcoe.h
drivers/scsi/fnic/Makefile
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_debugfs.c [new file with mode: 0644]
drivers/scsi/fnic/fnic_io.h
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/fnic_trace.c [new file with mode: 0644]
drivers/scsi/fnic/fnic_trace.h [new file with mode: 0644]
drivers/scsi/hpsa.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_libfc.h
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/osd/osd_initiator.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_settings.h
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pci.c [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h [new file with mode: 0644]
drivers/scsi/ufs/ufshci.h
include/scsi/Kbuild
include/scsi/fc/Kbuild
include/scsi/fcoe_sysfs.h
include/scsi/libfcoe.h
include/scsi/scsi_host.h
include/uapi/scsi/Kbuild
include/uapi/scsi/fc/Kbuild
include/uapi/scsi/fc/fc_els.h [moved from include/scsi/fc/fc_els.h with 100% similarity]
include/uapi/scsi/fc/fc_fs.h [moved from include/scsi/fc/fc_fs.h with 100% similarity]
include/uapi/scsi/fc/fc_gs.h [moved from include/scsi/fc/fc_gs.h with 100% similarity]
include/uapi/scsi/fc/fc_ns.h [moved from include/scsi/fc/fc_ns.h with 100% similarity]
include/uapi/scsi/scsi_bsg_fc.h [moved from include/scsi/scsi_bsg_fc.h with 100% similarity]
include/uapi/scsi/scsi_netlink.h [moved from include/scsi/scsi_netlink.h with 100% similarity]
include/uapi/scsi/scsi_netlink_fc.h [moved from include/scsi/scsi_netlink_fc.h with 100% similarity]

index 50e2a80..21640ea 100644 (file)
@@ -1,14 +1,53 @@
-What:          /sys/bus/fcoe/ctlr_X
+What:          /sys/bus/fcoe/
+Date:          August 2012
+KernelVersion: TBD
+Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:   The FCoE bus. Attributes in this directory are control interfaces.
+Attributes:
+
+       ctlr_create: 'FCoE Controller' instance creation interface. Writing an
+                    <ifname> to this file will allocate and populate sysfs with a
+                    fcoe_ctlr_device (ctlr_X). The user can then configure any
+                    per-port settings and finally write to the fcoe_ctlr_device's
+                    'start' attribute to begin the kernel's discovery and login
+                    process.
+
+       ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a
+                      fcoe_ctlr_device's sysfs name to this file will log the
+                      fcoe_ctlr_device out of the fabric or otherwise connected
+                      FCoE devices. It will also free all kernel memory allocated
+                      for this fcoe_ctlr_device and any structures associated
+                      with it, this includes the scsi_host.
+
+What:          /sys/bus/fcoe/devices/ctlr_X
 Date:          March 2012
 KernelVersion: TBD
 Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
-Description:   'FCoE Controller' instances on the fcoe bus
+Description:   'FCoE Controller' instances on the fcoe bus.
+               The FCoE Controller now has a three stage creation process.
+               1) Write interface name to ctlr_create 2) Configure the FCoE
+               Controller (ctlr_X) 3) Enable the FCoE Controller to begin
+               discovery and login. The FCoE Controller is destroyed by
+               writing it's name, i.e. ctlr_X to the ctlr_delete file.
+
 Attributes:
 
        fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
                          this value will change the dev_loss_tmo for all
                          FCFs discovered by this controller.
 
+       mode:             Display or change the FCoE Controller's mode. Possible
+                         modes are 'Fabric' and 'VN2VN'. If a FCoE Controller
+                         is started in 'Fabric' mode then FIP FCF discovery is
+                         initiated and ultimately a fabric login is attempted.
+                         If a FCoE Controller is started in 'VN2VN' mode then
+                         FIP VN2VN discovery and login is performed. A FCoE
+                         Controller only supports one mode at a time.
+
+       enabled:          Whether an FCoE controller is enabled or disabled.
+                         0 if disabled, 1 if enabled. Writing either 0 or 1
+                         to this file will enable or disable the FCoE controller.
+
        lesb/link_fail:   Link Error Status Block (LESB) link failure count.
 
        lesb/vlink_fail:  Link Error Status Block (LESB) virtual link
@@ -26,7 +65,7 @@ Attributes:
 
 Notes: ctlr_X (global increment starting at 0)
 
-What:          /sys/bus/fcoe/fcf_X
+What:          /sys/bus/fcoe/devices/fcf_X
 Date:          March 2012
 KernelVersion: TBD
 Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
index da03146..09673c7 100644 (file)
@@ -1,3 +1,12 @@
+Release Date    : Sat. Feb 9, 2013 17:00:00 PST 2013 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford
+Current Version : 06.506.00.00-rc1
+Old Version     : 06.504.01.00-rc1
+    1. Add 4k FastPath DIF support.
+    2. Dont load DevHandle unless FastPath enabled.
+    3. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Mon. Oct 1, 2012 17:00:00 PST 2012 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
index 3b021ec..e2e3492 100644 (file)
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib)
                fib->hw_fib_va->header.StructType = FIB_MAGIC2;
                fib->hw_fib_va->header.SenderFibAddress = (u32)address;
                fib->hw_fib_va->header.u.TimeStamp = 0;
-               BUG_ON((u32)(address >> 32) != 0L);
+               BUG_ON(upper_32_bits(address) != 0L);
                address |= fibsize;
        } else {
                /* Calculate the amount to the fibsize bits */
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib)
                address |= fibsize;
        }
 
-       src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+       src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
        src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
 
        return 0;
index 6401db4..2daf4b0 100644 (file)
@@ -62,6 +62,10 @@ static int bnx2fc_destroy(struct net_device *net_device);
 static int bnx2fc_enable(struct net_device *netdev);
 static int bnx2fc_disable(struct net_device *netdev);
 
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
 static void bnx2fc_recv_frame(struct sk_buff *skb);
 
 static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
@@ -89,7 +93,6 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
 static void bnx2fc_stop(struct bnx2fc_interface *interface);
 static int __init bnx2fc_mod_init(void);
 static void __exit bnx2fc_mod_exit(void);
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 unsigned int bnx2fc_debug_level;
 module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -107,44 +110,6 @@ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
                ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
 }
 
-/**
- * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void bnx2fc_get_lesb(struct fc_lport *lport,
-                           struct fc_els_lesb *fc_lesb)
-{
-       struct net_device *netdev = bnx2fc_netdev(lport);
-
-       __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
-       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
-       struct net_device *netdev = bnx2fc_netdev(fip->lp);
-       struct fcoe_fc_els_lesb *fcoe_lesb;
-       struct fc_els_lesb fc_lesb;
-
-       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
-       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
-       ctlr_dev->lesb.lesb_link_fail =
-               ntohl(fcoe_lesb->lesb_link_fail);
-       ctlr_dev->lesb.lesb_vlink_fail =
-               ntohl(fcoe_lesb->lesb_vlink_fail);
-       ctlr_dev->lesb.lesb_miss_fka =
-               ntohl(fcoe_lesb->lesb_miss_fka);
-       ctlr_dev->lesb.lesb_symb_err =
-               ntohl(fcoe_lesb->lesb_symb_err);
-       ctlr_dev->lesb.lesb_err_block =
-               ntohl(fcoe_lesb->lesb_err_block);
-       ctlr_dev->lesb.lesb_fcs_error =
-               ntohl(fcoe_lesb->lesb_fcs_error);
-}
-EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
-
 static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
 {
        struct fcoe_ctlr_device *ctlr_dev =
@@ -741,35 +706,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
        return 0;
 }
 
-static void bnx2fc_link_speed_update(struct fc_lport *lport)
-{
-       struct fcoe_port *port = lport_priv(lport);
-       struct bnx2fc_interface *interface = port->priv;
-       struct net_device *netdev = interface->netdev;
-       struct ethtool_cmd ecmd;
-
-       if (!__ethtool_get_settings(netdev, &ecmd)) {
-               lport->link_supported_speeds &=
-                       ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
-               if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-                                     SUPPORTED_1000baseT_Full))
-                       lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-               if (ecmd.supported & SUPPORTED_10000baseT_Full)
-                       lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-
-               switch (ethtool_cmd_speed(&ecmd)) {
-               case SPEED_1000:
-                       lport->link_speed = FC_PORTSPEED_1GBIT;
-                       break;
-               case SPEED_2500:
-                       lport->link_speed = FC_PORTSPEED_2GBIT;
-                       break;
-               case SPEED_10000:
-                       lport->link_speed = FC_PORTSPEED_10GBIT;
-                       break;
-               }
-       }
-}
 static int bnx2fc_link_ok(struct fc_lport *lport)
 {
        struct fcoe_port *port = lport_priv(lport);
@@ -827,7 +763,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
        port->fcoe_pending_queue_active = 0;
        setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
 
-       bnx2fc_link_speed_update(lport);
+       fcoe_link_speed_update(lport);
 
        if (!lport->vport) {
                if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
@@ -871,6 +807,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                                     u16 vlan_id)
 {
        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+       struct fcoe_ctlr_device *cdev;
        struct fc_lport *lport;
        struct fc_lport *vport;
        struct bnx2fc_interface *interface, *tmp;
@@ -930,30 +867,47 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
                                interface->netdev->name, event);
 
-               bnx2fc_link_speed_update(lport);
+               fcoe_link_speed_update(lport);
+
+               cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
 
                if (link_possible && !bnx2fc_link_ok(lport)) {
-                       /* Reset max recv frame size to default */
-                       fc_set_mfs(lport, BNX2FC_MFS);
-                       /*
-                        * ctlr link up will only be handled during
-                        * enable to avoid sending discovery solicitation
-                        * on a stale vlan
-                        */
-                       if (interface->enabled)
-                               fcoe_ctlr_link_up(ctlr);
+                       switch (cdev->enabled) {
+                       case FCOE_CTLR_DISABLED:
+                               pr_info("Link up while interface is disabled.\n");
+                               break;
+                       case FCOE_CTLR_ENABLED:
+                       case FCOE_CTLR_UNUSED:
+                               /* Reset max recv frame size to default */
+                               fc_set_mfs(lport, BNX2FC_MFS);
+                               /*
+                                * ctlr link up will only be handled during
+                                * enable to avoid sending discovery
+                                * solicitation on a stale vlan
+                                */
+                               if (interface->enabled)
+                                       fcoe_ctlr_link_up(ctlr);
+                       };
                } else if (fcoe_ctlr_link_down(ctlr)) {
-                       mutex_lock(&lport->lp_mutex);
-                       list_for_each_entry(vport, &lport->vports, list)
-                               fc_host_port_type(vport->host) =
-                                                       FC_PORTTYPE_UNKNOWN;
-                       mutex_unlock(&lport->lp_mutex);
-                       fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
-                       per_cpu_ptr(lport->stats,
-                                   get_cpu())->LinkFailureCount++;
-                       put_cpu();
-                       fcoe_clean_pending_queue(lport);
-                       wait_for_upload = 1;
+                       switch (cdev->enabled) {
+                       case FCOE_CTLR_DISABLED:
+                               pr_info("Link down while interface is disabled.\n");
+                               break;
+                       case FCOE_CTLR_ENABLED:
+                       case FCOE_CTLR_UNUSED:
+                               mutex_lock(&lport->lp_mutex);
+                               list_for_each_entry(vport, &lport->vports, list)
+                                       fc_host_port_type(vport->host) =
+                                       FC_PORTTYPE_UNKNOWN;
+                               mutex_unlock(&lport->lp_mutex);
+                               fc_host_port_type(lport->host) =
+                                       FC_PORTTYPE_UNKNOWN;
+                               per_cpu_ptr(lport->stats,
+                                           get_cpu())->LinkFailureCount++;
+                               put_cpu();
+                               fcoe_clean_pending_queue(lport);
+                               wait_for_upload = 1;
+                       };
                }
        }
        mutex_unlock(&bnx2fc_dev_lock);
@@ -1484,6 +1438,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
        port = lport_priv(lport);
        port->lport = lport;
        port->priv = interface;
+       port->get_netdev = bnx2fc_netdev;
        INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
 
        /* Configure fcoe_port */
@@ -2003,7 +1958,9 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
                set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
 }
 
-
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
 static int bnx2fc_disable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
@@ -2029,7 +1986,9 @@ static int bnx2fc_disable(struct net_device *netdev)
        return rc;
 }
 
-
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
 static int bnx2fc_enable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
@@ -2055,17 +2014,57 @@ static int bnx2fc_enable(struct net_device *netdev)
 }
 
 /**
- * bnx2fc_create - Create bnx2fc FCoE interface
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+       struct fc_lport *lport = ctlr->lp;
+       struct net_device *netdev = bnx2fc_netdev(lport);
+
+       switch (cdev->enabled) {
+       case FCOE_CTLR_ENABLED:
+               return bnx2fc_enable(netdev);
+       case FCOE_CTLR_DISABLED:
+               return bnx2fc_disable(netdev);
+       case FCOE_CTLR_UNUSED:
+       default:
+               return -ENOTSUPP;
+       };
+}
+
+enum bnx2fc_create_link_state {
+       BNX2FC_CREATE_LINK_DOWN,
+       BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev  :   The net_device object the Ethernet interface to create on
+ * @fip_mode:   The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
  *
- * @buffer: The name of Ethernet interface to create on
- * @kp:     The associated kernel param
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
  *
- * Called from sysfs.
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
  *
  * Returns: 0 for success
  */
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _bnx2fc_create(struct net_device *netdev,
+                         enum fip_state fip_mode,
+                         enum bnx2fc_create_link_state link_state)
 {
+       struct fcoe_ctlr_device *cdev;
        struct fcoe_ctlr *ctlr;
        struct bnx2fc_interface *interface;
        struct bnx2fc_hba *hba;
@@ -2160,7 +2159,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
        /* Make this master N_port */
        ctlr->lp = lport;
 
-       if (!bnx2fc_link_ok(lport)) {
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+       if (link_state == BNX2FC_CREATE_LINK_UP)
+               cdev->enabled = FCOE_CTLR_ENABLED;
+       else
+               cdev->enabled = FCOE_CTLR_DISABLED;
+
+       if (link_state == BNX2FC_CREATE_LINK_UP &&
+           !bnx2fc_link_ok(lport)) {
                fcoe_ctlr_link_up(ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
@@ -2168,7 +2175,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
 
        BNX2FC_HBA_DBG(lport, "create: START DISC\n");
        bnx2fc_start_disc(interface);
-       interface->enabled = true;
+
+       if (link_state == BNX2FC_CREATE_LINK_UP)
+               interface->enabled = true;
+
        /*
         * Release from kref_init in bnx2fc_interface_setup, on success
         * lport should be holding a reference taken in bnx2fc_if_create
@@ -2193,6 +2203,37 @@ mod_err:
        return rc;
 }
 
+/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev  : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+       return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+       return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+                             BNX2FC_CREATE_LINK_DOWN);
+}
+
 /**
  * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
  *
@@ -2318,6 +2359,7 @@ static struct fcoe_transport bnx2fc_transport = {
        .name = {"bnx2fc"},
        .attached = false,
        .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+       .alloc = bnx2fc_ctlr_alloc,
        .match = bnx2fc_match,
        .create = bnx2fc_create,
        .destroy = bnx2fc_destroy,
@@ -2562,13 +2604,13 @@ module_init(bnx2fc_mod_init);
 module_exit(bnx2fc_mod_exit);
 
 static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
-       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
-       .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+       .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+       .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
 
        .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
        .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
@@ -2675,7 +2717,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
        .elsct_send             = bnx2fc_elsct_send,
        .fcp_abort_io           = bnx2fc_abort_io,
        .fcp_cleanup            = bnx2fc_cleanup,
-       .get_lesb               = bnx2fc_get_lesb,
+       .get_lesb               = fcoe_get_lesb,
        .rport_event_callback   = bnx2fc_rport_event_handler,
 };
 
index 865c64f..fed486b 100644 (file)
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
        dcb->max_command = 1;
        dcb->target_id = target;
        dcb->target_lun = lun;
+       dcb->dev_mode = eeprom->target[target].cfg0;
 #ifndef DC395x_NO_DISCONNECT
        dcb->identify_msg =
            IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
 #else
        dcb->identify_msg = IDENTIFY(0, lun);
 #endif
-       dcb->dev_mode = eeprom->target[target].cfg0;
        dcb->inquiry7 = 0;
        dcb->sync_mode = 0;
        dcb->min_nego_period = clock_period[period_index];
index 666b7ac..b5d92fc 100644 (file)
@@ -82,11 +82,11 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
                    struct packet_type *, struct net_device *);
 static int fcoe_percpu_receive_thread(void *);
 static void fcoe_percpu_clean(struct fc_lport *);
-static int fcoe_link_speed_update(struct fc_lport *);
 static int fcoe_link_ok(struct fc_lport *);
 
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
 static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
 
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static void fcoe_dev_setup(void);
@@ -117,6 +117,11 @@ static int fcoe_destroy(struct net_device *netdev);
 static int fcoe_enable(struct net_device *netdev);
 static int fcoe_disable(struct net_device *netdev);
 
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
                                      u32 did, struct fc_frame *,
                                      unsigned int op,
@@ -126,8 +131,6 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
                                      void *, u32 timeout);
 static void fcoe_recv_frame(struct sk_buff *skb);
 
-static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-
 /* notification function for packets from net device */
 static struct notifier_block fcoe_notifier = {
        .notifier_call = fcoe_device_notification,
@@ -151,11 +154,11 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
 static int fcoe_vport_disable(struct fc_vport *, bool disable);
 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
 
 static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
-       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+       .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+       .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
        .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
        .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
        .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
@@ -1112,10 +1115,17 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
        port = lport_priv(lport);
        port->lport = lport;
        port->priv = fcoe;
+       port->get_netdev = fcoe_netdev;
        port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
        port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
        INIT_WORK(&port->destroy_work, fcoe_destroy_work);
 
+       /*
+        * Need to add the lport to the hostlist
+        * so we catch NETDEV_CHANGE events.
+        */
+       fcoe_hostlist_add(lport);
+
        /* configure a fc_lport including the exchange manager */
        rc = fcoe_lport_config(lport);
        if (rc) {
@@ -1187,6 +1197,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 out_lp_destroy:
        fc_exch_mgr_free(lport);
 out_host_put:
+       fcoe_hostlist_del(lport);
        scsi_host_put(lport->host);
 out:
        return ERR_PTR(rc);
@@ -1964,6 +1975,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
 static int fcoe_device_notification(struct notifier_block *notifier,
                                    ulong event, void *ptr)
 {
+       struct fcoe_ctlr_device *cdev;
        struct fc_lport *lport = NULL;
        struct net_device *netdev = ptr;
        struct fcoe_ctlr *ctlr;
@@ -2020,13 +2032,29 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 
        fcoe_link_speed_update(lport);
 
-       if (link_possible && !fcoe_link_ok(lport))
-               fcoe_ctlr_link_up(ctlr);
-       else if (fcoe_ctlr_link_down(ctlr)) {
-               stats = per_cpu_ptr(lport->stats, get_cpu());
-               stats->LinkFailureCount++;
-               put_cpu();
-               fcoe_clean_pending_queue(lport);
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+       if (link_possible && !fcoe_link_ok(lport)) {
+               switch (cdev->enabled) {
+               case FCOE_CTLR_DISABLED:
+                       pr_info("Link up while interface is disabled.\n");
+                       break;
+               case FCOE_CTLR_ENABLED:
+               case FCOE_CTLR_UNUSED:
+                       fcoe_ctlr_link_up(ctlr);
+               };
+       } else if (fcoe_ctlr_link_down(ctlr)) {
+               switch (cdev->enabled) {
+               case FCOE_CTLR_DISABLED:
+                       pr_info("Link down while interface is disabled.\n");
+                       break;
+               case FCOE_CTLR_ENABLED:
+               case FCOE_CTLR_UNUSED:
+                       stats = per_cpu_ptr(lport->stats, get_cpu());
+                       stats->LinkFailureCount++;
+                       put_cpu();
+                       fcoe_clean_pending_queue(lport);
+               };
        }
 out:
        return rc;
@@ -2039,6 +2067,8 @@ out:
  * Called from fcoe transport.
  *
  * Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
  */
 static int fcoe_disable(struct net_device *netdev)
 {
@@ -2097,6 +2127,33 @@ out:
        return rc;
 }
 
+/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+       struct fc_lport *lport = ctlr->lp;
+       struct net_device *netdev = fcoe_netdev(lport);
+
+       switch (cdev->enabled) {
+       case FCOE_CTLR_ENABLED:
+               return fcoe_enable(netdev);
+       case FCOE_CTLR_DISABLED:
+               return fcoe_disable(netdev);
+       case FCOE_CTLR_UNUSED:
+       default:
+               return -ENOTSUPP;
+       };
+}
+
 /**
  * fcoe_destroy() - Destroy a FCoE interface
  * @netdev  : The net_device object the Ethernet interface to create on
@@ -2139,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work)
 {
        struct fcoe_port *port;
        struct fcoe_interface *fcoe;
+       struct Scsi_Host *shost;
+       struct fc_host_attrs *fc_host;
+       unsigned long flags;
+       struct fc_vport *vport;
+       struct fc_vport *next_vport;
 
        port = container_of(work, struct fcoe_port, destroy_work);
+       shost = port->lport->host;
+       fc_host = shost_to_fc_host(shost);
+
+       /* Loop through all the vports and mark them for deletion */
+       spin_lock_irqsave(shost->host_lock, flags);
+       list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+               if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+                       continue;
+               } else {
+                       vport->flags |= FC_VPORT_DELETING;
+                       queue_work(fc_host_work_q(shost),
+                                  &vport->vport_delete_work);
+               }
+       }
+       spin_unlock_irqrestore(shost->host_lock, flags);
+
+       flush_workqueue(fc_host_work_q(shost));
+
        mutex_lock(&fcoe_config_mutex);
 
        fcoe = port->priv;
@@ -2204,16 +2284,26 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 #endif
 }
 
+enum fcoe_create_link_state {
+       FCOE_CREATE_LINK_DOWN,
+       FCOE_CREATE_LINK_UP,
+};
+
 /**
- * fcoe_create() - Create a fcoe interface
- * @netdev  : The net_device object the Ethernet interface to create on
- * @fip_mode: The FIP mode for this creation
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev  :   The net_device object the Ethernet interface to create on
+ * @fip_mode:   The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
  *
- * Called from fcoe transport
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
  *
- * Returns: 0 for success
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
  */
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+                       enum fcoe_create_link_state link_state)
 {
        int rc = 0;
        struct fcoe_ctlr_device *ctlr_dev;
@@ -2254,13 +2344,29 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* setup DCB priority attributes. */
        fcoe_dcb_create(fcoe);
 
-       /* add to lports list */
-       fcoe_hostlist_add(lport);
-
        /* start FIP Discovery and FLOGI */
        lport->boot_time = jiffies;
        fc_fabric_login(lport);
-       if (!fcoe_link_ok(lport)) {
+
+       /*
+        * If the fcoe_ctlr_device is to be set to DISABLED
+        * it must be done after the lport is added to the
+        * hostlist, but before the rtnl_lock is released.
+        * This is because the rtnl_lock protects the
+        * hostlist that fcoe_device_notification uses. If
+        * the FCoE Controller is intended to be created
+        * DISABLED then 'enabled' needs to be considered
+        * handling link events. 'enabled' must be set
+        * before the lport can be found in the hostlist
+        * when a link up event is received.
+        */
+       if (link_state == FCOE_CREATE_LINK_UP)
+               ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+       else
+               ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+       if (link_state == FCOE_CREATE_LINK_UP &&
+           !fcoe_link_ok(lport)) {
                rtnl_unlock();
                fcoe_ctlr_link_up(ctlr);
                mutex_unlock(&fcoe_config_mutex);
@@ -2275,37 +2381,34 @@ out_nortnl:
 }
 
 /**
- * fcoe_link_speed_update() - Update the supported and actual link speeds
- * @lport: The local port to update speeds for
+ * fcoe_create() - Create a fcoe interface
+ * @netdev  : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+       return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
  *
- * Returns: 0 if the ethtool query was successful
- *          -1 if the ethtool query failed
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
  */
-static int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_ctlr_alloc(struct net_device *netdev)
 {
-       struct net_device *netdev = fcoe_netdev(lport);
-       struct ethtool_cmd ecmd;
-
-       if (!__ethtool_get_settings(netdev, &ecmd)) {
-               lport->link_supported_speeds &=
-                       ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
-               if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-                                     SUPPORTED_1000baseT_Full))
-                       lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-               if (ecmd.supported & SUPPORTED_10000baseT_Full)
-                       lport->link_supported_speeds |=
-                               FC_PORTSPEED_10GBIT;
-               switch (ethtool_cmd_speed(&ecmd)) {
-               case SPEED_1000:
-                       lport->link_speed = FC_PORTSPEED_1GBIT;
-                       break;
-               case SPEED_10000:
-                       lport->link_speed = FC_PORTSPEED_10GBIT;
-                       break;
-               }
-               return 0;
-       }
-       return -1;
+       return _fcoe_create(netdev, FIP_MODE_FABRIC,
+                           FCOE_CREATE_LINK_DOWN);
 }
 
 /**
@@ -2375,10 +2478,13 @@ static int fcoe_reset(struct Scsi_Host *shost)
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
        struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+       struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
 
        fcoe_ctlr_link_down(ctlr);
        fcoe_clean_pending_queue(ctlr->lp);
-       if (!fcoe_link_ok(ctlr->lp))
+
+       if (cdev->enabled != FCOE_CTLR_DISABLED &&
+           !fcoe_link_ok(ctlr->lp))
                fcoe_ctlr_link_up(ctlr);
        return 0;
 }
@@ -2445,12 +2551,31 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
        return 0;
 }
 
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ *                      port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+       struct fcoe_interface *fcoe;
+       struct fcoe_port *port;
+
+       port = lport_priv(lport);
+       fcoe = port->priv;
+       list_del(&fcoe->list);
+       return;
+}
 
 static struct fcoe_transport fcoe_sw_transport = {
        .name = {FCOE_TRANSPORT_DEFAULT},
        .attached = false,
        .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
        .match = fcoe_match,
+       .alloc = fcoe_ctlr_alloc,
        .create = fcoe_create,
        .destroy = fcoe_destroy,
        .enable = fcoe_enable,
@@ -2534,9 +2659,9 @@ static void __exit fcoe_exit(void)
        /* releases the associated fcoe hosts */
        rtnl_lock();
        list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
-               list_del(&fcoe->list);
                ctlr = fcoe_to_ctlr(fcoe);
                port = lport_priv(ctlr->lp);
+               fcoe_hostlist_del(port->lport);
                queue_work(fcoe_wq, &port->destroy_work);
        }
        rtnl_unlock();
@@ -2776,43 +2901,6 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
                             NULL, NULL, 3 * lport->r_a_tov);
 }
 
-/**
- * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void fcoe_get_lesb(struct fc_lport *lport,
-                        struct fc_els_lesb *fc_lesb)
-{
-       struct net_device *netdev = fcoe_netdev(lport);
-
-       __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
-       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
-       struct net_device *netdev = fcoe_netdev(fip->lp);
-       struct fcoe_fc_els_lesb *fcoe_lesb;
-       struct fc_els_lesb fc_lesb;
-
-       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
-       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
-       ctlr_dev->lesb.lesb_link_fail =
-               ntohl(fcoe_lesb->lesb_link_fail);
-       ctlr_dev->lesb.lesb_vlink_fail =
-               ntohl(fcoe_lesb->lesb_vlink_fail);
-       ctlr_dev->lesb.lesb_miss_fka =
-               ntohl(fcoe_lesb->lesb_miss_fka);
-       ctlr_dev->lesb.lesb_symb_err =
-               ntohl(fcoe_lesb->lesb_symb_err);
-       ctlr_dev->lesb.lesb_err_block =
-               ntohl(fcoe_lesb->lesb_err_block);
-       ctlr_dev->lesb.lesb_fcs_error =
-               ntohl(fcoe_lesb->lesb_fcs_error);
-}
-
 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
 {
        struct fcoe_ctlr_device *ctlr_dev =
index b42dc32..2b53672 100644 (file)
@@ -55,12 +55,12 @@ do {                                                                \
 
 #define FCOE_DBG(fmt, args...)                                         \
        FCOE_CHECK_LOGGING(FCOE_LOGGING,                                \
-                          printk(KERN_INFO "fcoe: " fmt, ##args);)
+                          pr_info("fcoe: " fmt, ##args);)
 
 #define FCOE_NETDEV_DBG(netdev, fmt, args...)                  \
        FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING,                 \
-                          printk(KERN_INFO "fcoe: %s: " fmt,   \
-                                 netdev->name, ##args);)
+                          pr_info("fcoe: %s: " fmt,            \
+                                  netdev->name, ##args);)
 
 /**
  * struct fcoe_interface - A FCoE interface
index 4a909d7..08c3bc3 100644 (file)
@@ -1291,8 +1291,16 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 
        LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
 
-       if (!fcf || !lport->port_id)
+       if (!fcf || !lport->port_id) {
+               /*
+                * We are yet to select best FCF, but we got CVL in the
+                * meantime. reset the ctlr and let it rediscover the FCF
+                */
+               mutex_lock(&fip->ctlr_mutex);
+               fcoe_ctlr_reset(fip);
+               mutex_unlock(&fip->ctlr_mutex);
                return;
+       }
 
        /*
         * mask of required descriptors.  Validating each one clears its bit.
@@ -1551,15 +1559,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
                                fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
                                fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
                                fcf->flogi_sent, fcf->pri);
-               if (fcf->fabric_name != first->fabric_name ||
-                   fcf->vfid != first->vfid ||
-                   fcf->fc_map != first->fc_map) {
-                       LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
-                                       "or FC-MAP\n");
-                       return NULL;
-               }
-               if (fcf->flogi_sent)
-                       continue;
                if (!fcoe_ctlr_fcf_usable(fcf)) {
                        LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
                                        "map %x %svalid %savailable\n",
@@ -1569,6 +1568,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
                                        "" : "un");
                        continue;
                }
+               if (fcf->fabric_name != first->fabric_name ||
+                   fcf->vfid != first->vfid ||
+                   fcf->fc_map != first->fc_map) {
+                       LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+                                       "or FC-MAP\n");
+                       return NULL;
+               }
+               if (fcf->flogi_sent)
+                       continue;
                if (!best || fcf->pri < best->pri || best->flogi_sent)
                        best = fcf;
        }
@@ -2864,22 +2872,21 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
 }
 EXPORT_SYMBOL(fcoe_fcf_get_selected);
 
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
 {
        struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
 
        mutex_lock(&ctlr->ctlr_mutex);
-       switch (ctlr->mode) {
-       case FIP_MODE_FABRIC:
-               ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
-               break;
-       case FIP_MODE_VN2VN:
-               ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+       switch (ctlr_dev->mode) {
+       case FIP_CONN_TYPE_VN2VN:
+               ctlr->mode = FIP_MODE_VN2VN;
                break;
+       case FIP_CONN_TYPE_FABRIC:
        default:
-               ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+               ctlr->mode = FIP_MODE_FABRIC;
                break;
        }
+
        mutex_unlock(&ctlr->ctlr_mutex);
 }
-EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
index 5e75168..8c05ae0 100644 (file)
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
+#include <linux/ctype.h>
 
 #include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
 
 static atomic_t ctlr_num;
 static atomic_t fcf_num;
@@ -71,6 +80,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
        ((x)->lesb.lesb_err_block)
 #define fcoe_ctlr_fcs_error(x)                 \
        ((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x)                   \
+       ((x)->enabled)
 #define fcoe_fcf_state(x)                      \
        ((x)->state)
 #define fcoe_fcf_fabric_name(x)                        \
@@ -210,25 +221,34 @@ static ssize_t show_fcoe_fcf_device_##field(struct device *dev,   \
 #define fcoe_enum_name_search(title, table_type, table)                        \
 static const char *get_fcoe_##title##_name(enum table_type table_key)  \
 {                                                                      \
-       int i;                                                          \
-       char *name = NULL;                                              \
-                                                                       \
-       for (i = 0; i < ARRAY_SIZE(table); i++) {                       \
-               if (table[i].value == table_key) {                      \
-                       name = table[i].name;                           \
-                       break;                                          \
-               }                                                       \
-       }                                                               \
-       return name;                                                    \
+       if (table_key < 0 || table_key >= ARRAY_SIZE(table))            \
+               return NULL;                                            \
+       return table[table_key];                                        \
+}
+
+static char *fip_conn_type_names[] = {
+       [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+       [ FIP_CONN_TYPE_FABRIC ]  = "Fabric",
+       [ FIP_CONN_TYPE_VN2VN ]   = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+               if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+                       return i;
+       }
+
+       return FIP_CONN_TYPE_UNKNOWN;
 }
 
-static struct {
-       enum fcf_state value;
-       char           *name;
-} fcf_state_names[] = {
-       { FCOE_FCF_STATE_UNKNOWN,      "Unknown" },
-       { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
-       { FCOE_FCF_STATE_CONNECTED,    "Connected" },
+static char *fcf_state_names[] = {
+       [ FCOE_FCF_STATE_UNKNOWN ]      = "Unknown",
+       [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+       [ FCOE_FCF_STATE_CONNECTED ]    = "Connected",
 };
 fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
 #define FCOE_FCF_STATE_MAX_NAMELEN 50
@@ -246,17 +266,7 @@ static ssize_t show_fcf_state(struct device *dev,
 }
 static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
 
-static struct {
-       enum fip_conn_type value;
-       char               *name;
-} fip_conn_type_names[] = {
-       { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
-       { FIP_CONN_TYPE_FABRIC, "Fabric" },
-       { FIP_CONN_TYPE_VN2VN, "VN2VN" },
-};
-fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-#define FCOE_CTLR_MODE_MAX_NAMELEN 50
-
+#define FCOE_MAX_MODENAME_LEN 20
 static ssize_t show_ctlr_mode(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
@@ -264,17 +274,116 @@ static ssize_t show_ctlr_mode(struct device *dev,
        struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
        const char *name;
 
-       if (ctlr->f->get_fcoe_ctlr_mode)
-               ctlr->f->get_fcoe_ctlr_mode(ctlr);
-
        name = get_fcoe_ctlr_mode_name(ctlr->mode);
        if (!name)
                return -EINVAL;
-       return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+       return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+                       "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+       if (count > FCOE_MAX_MODENAME_LEN)
+               return -EINVAL;
+
+       strncpy(mode, buf, count);
+
+       if (mode[count - 1] == '\n')
+               mode[count - 1] = '\0';
+       else
+               mode[count] = '\0';
+
+       switch (ctlr->enabled) {
+       case FCOE_CTLR_ENABLED:
+               LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+               return -EBUSY;
+       case FCOE_CTLR_DISABLED:
+               if (!ctlr->f->set_fcoe_ctlr_mode) {
+                       LIBFCOE_SYSFS_DBG(ctlr,
+                                         "Mode change not supported by LLD.");
+                       return -ENOTSUPP;
+               }
+
+               ctlr->mode = fcoe_parse_mode(mode);
+               if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+                       LIBFCOE_SYSFS_DBG(ctlr,
+                                         "Unknown mode %s provided.", buf);
+                       return -EINVAL;
+               }
+
+               ctlr->f->set_fcoe_ctlr_mode(ctlr);
+               LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+
+               return count;
+       case FCOE_CTLR_UNUSED:
+       default:
+               LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+               return -ENOTSUPP;
+       };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+                       show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       int rc;
+
+       switch (ctlr->enabled) {
+       case FCOE_CTLR_ENABLED:
+               if (*buf == '1')
+                       return count;
+               ctlr->enabled = FCOE_CTLR_DISABLED;
+               break;
+       case FCOE_CTLR_DISABLED:
+               if (*buf == '0')
+                       return count;
+               ctlr->enabled = FCOE_CTLR_ENABLED;
+               break;
+       case FCOE_CTLR_UNUSED:
+               return -ENOTSUPP;
+       };
+
+       rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+       [ FCOE_CTLR_ENABLED ]  = "1",
+       [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+                     ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       const char *name;
+
+       name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+       if (!name)
+               return -EINVAL;
+       return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
                        "%s\n", name);
 }
-static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
-                       show_ctlr_mode, NULL);
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+                       show_ctlr_enabled_state,
+                       store_ctlr_enabled);
 
 static ssize_t
 store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
@@ -359,6 +468,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
 
 static struct attribute *fcoe_ctlr_attrs[] = {
        &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+       &device_attr_fcoe_ctlr_enabled.attr,
        &device_attr_fcoe_ctlr_mode.attr,
        NULL,
 };
@@ -443,9 +553,16 @@ struct device_type fcoe_fcf_device_type = {
        .release = fcoe_fcf_device_release,
 };
 
+struct bus_attribute fcoe_bus_attr_group[] = {
+       __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
+       __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
+       __ATTR_NULL
+};
+
 struct bus_type fcoe_bus_type = {
        .name = "fcoe",
        .match = &fcoe_bus_match,
+       .bus_attrs = fcoe_bus_attr_group,
 };
 
 /**
@@ -566,6 +683,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
 
        ctlr->id = atomic_inc_return(&ctlr_num) - 1;
        ctlr->f = f;
+       ctlr->mode = FIP_CONN_TYPE_FABRIC;
        INIT_LIST_HEAD(&ctlr->fcfs);
        mutex_init(&ctlr->lock);
        ctlr->dev.parent = parent;
index ac76d8a..f3a5a53 100644 (file)
@@ -83,6 +83,50 @@ static struct notifier_block libfcoe_notifier = {
        .notifier_call = libfcoe_device_notification,
 };
 
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ *          -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+       struct net_device *netdev = fcoe_get_netdev(lport);
+       struct ethtool_cmd ecmd;
+
+       if (!__ethtool_get_settings(netdev, &ecmd)) {
+               lport->link_supported_speeds &=
+                       ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+               if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+                                     SUPPORTED_1000baseT_Full))
+                       lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+               if (ecmd.supported & SUPPORTED_10000baseT_Full)
+                       lport->link_supported_speeds |=
+                               FC_PORTSPEED_10GBIT;
+               switch (ethtool_cmd_speed(&ecmd)) {
+               case SPEED_1000:
+                       lport->link_speed = FC_PORTSPEED_1GBIT;
+                       break;
+               case SPEED_10000:
+                       lport->link_speed = FC_PORTSPEED_10GBIT;
+                       break;
+               }
+               return 0;
+       }
+       return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
 void __fcoe_get_lesb(struct fc_lport *lport,
                     struct fc_els_lesb *fc_lesb,
                     struct net_device *netdev)
@@ -112,6 +156,51 @@ void __fcoe_get_lesb(struct fc_lport *lport,
 }
 EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
 
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+                        struct fc_els_lesb *fc_lesb)
+{
+       struct net_device *netdev = fcoe_get_netdev(lport);
+
+       __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct net_device *netdev = fcoe_get_netdev(fip->lp);
+       struct fcoe_fc_els_lesb *fcoe_lesb;
+       struct fc_els_lesb fc_lesb;
+
+       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+       ctlr_dev->lesb.lesb_link_fail =
+               ntohl(fcoe_lesb->lesb_link_fail);
+       ctlr_dev->lesb.lesb_vlink_fail =
+               ntohl(fcoe_lesb->lesb_vlink_fail);
+       ctlr_dev->lesb.lesb_miss_fka =
+               ntohl(fcoe_lesb->lesb_miss_fka);
+       ctlr_dev->lesb.lesb_symb_err =
+               ntohl(fcoe_lesb->lesb_symb_err);
+       ctlr_dev->lesb.lesb_err_block =
+               ntohl(fcoe_lesb->lesb_err_block);
+       ctlr_dev->lesb.lesb_fcs_error =
+               ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
 void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
 {
        u8 wwpn[8];
@@ -627,6 +716,110 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
        return NOTIFY_OK;
 }
 
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+                              const char *buf, size_t count)
+{
+       struct net_device *netdev = NULL;
+       struct fcoe_transport *ft = NULL;
+       struct fcoe_ctlr_device *ctlr_dev = NULL;
+       int rc = 0;
+       int err;
+
+       mutex_lock(&ft_mutex);
+
+       netdev = fcoe_if_to_netdev(buf);
+       if (!netdev) {
+               LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+               rc = -ENODEV;
+               goto out_nodev;
+       }
+
+       ft = fcoe_netdev_map_lookup(netdev);
+       if (ft) {
+               LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+                                     "FCoE instance on %s.\n",
+                                     ft->name, netdev->name);
+               rc = -EEXIST;
+               goto out_putdev;
+       }
+
+       ft = fcoe_transport_lookup(netdev);
+       if (!ft) {
+               LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+                                     netdev->name);
+               rc = -ENODEV;
+               goto out_putdev;
+       }
+
+       /* pass to transport create */
+       err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+       if (err) {
+               fcoe_del_netdev_mapping(netdev);
+               rc = -ENOMEM;
+               goto out_putdev;
+       }
+
+       err = fcoe_add_netdev_mapping(netdev, ft);
+       if (err) {
+               LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+                                     "for FCoE transport %s for %s.\n",
+                                     ft->name, netdev->name);
+               rc = -ENODEV;
+               goto out_putdev;
+       }
+
+       LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+                             ft->name, (ctlr_dev) ? "succeeded" : "failed",
+                             netdev->name);
+
+out_putdev:
+       dev_put(netdev);
+out_nodev:
+       mutex_unlock(&ft_mutex);
+       if (rc)
+               return rc;
+       return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+                               const char *buf, size_t count)
+{
+       int rc = -ENODEV;
+       struct net_device *netdev = NULL;
+       struct fcoe_transport *ft = NULL;
+
+       mutex_lock(&ft_mutex);
+
+       netdev = fcoe_if_to_netdev(buf);
+       if (!netdev) {
+               LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+               goto out_nodev;
+       }
+
+       ft = fcoe_netdev_map_lookup(netdev);
+       if (!ft) {
+               LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+                                     netdev->name);
+               goto out_putdev;
+       }
+
+       /* pass to transport destroy */
+       rc = ft->destroy(netdev);
+       if (rc)
+               goto out_putdev;
+
+       fcoe_del_netdev_mapping(netdev);
+       LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+                             ft->name, (rc) ? "failed" : "succeeded",
+                             netdev->name);
+       rc = count; /* required for successful return */
+out_putdev:
+       dev_put(netdev);
+out_nodev:
+       mutex_unlock(&ft_mutex);
+       return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
 
 /**
  * fcoe_transport_create() - Create a fcoe interface
@@ -769,11 +962,7 @@ out_putdev:
        dev_put(netdev);
 out_nodev:
        mutex_unlock(&ft_mutex);
-
-       if (rc == -ERESTARTSYS)
-               return restart_syscall();
-       else
-               return rc;
+       return rc;
 }
 
 /**
index 6af5fc3..d3bb16d 100644 (file)
@@ -2,9 +2,10 @@
 #define _FCOE_LIBFCOE_H_
 
 extern unsigned int libfcoe_debug_logging;
-#define LIBFCOE_LOGGING            0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-#define LIBFCOE_TRANSPORT_LOGGING      0x04 /* FCoE transport logging */
+#define LIBFCOE_LOGGING                  0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING       0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING     0x08 /* fcoe_sysfs logging */
 
 #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)              \
 do {                                                   \
@@ -16,16 +17,19 @@ do {                                                        \
 
 #define LIBFCOE_DBG(fmt, args...)                                      \
        LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,                          \
-                             printk(KERN_INFO "libfcoe: " fmt, ##args);)
+                             pr_info("libfcoe: " fmt, ##args);)
 
 #define LIBFCOE_FIP_DBG(fip, fmt, args...)                             \
        LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING,                      \
-                             printk(KERN_INFO "host%d: fip: " fmt,     \
-                                    (fip)->lp->host->host_no, ##args);)
+                             pr_info("host%d: fip: " fmt,              \
+                                     (fip)->lp->host->host_no, ##args);)
 
 #define LIBFCOE_TRANSPORT_DBG(fmt, args...)                            \
        LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING,                \
-                             printk(KERN_INFO "%s: " fmt,              \
-                                    __func__, ##args);)
+                             pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...)                          \
+       LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING,                    \
+                             pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
 
 #endif /* _FCOE_LIBFCOE_H_ */
index 37c3440..383598f 100644 (file)
@@ -7,6 +7,8 @@ fnic-y  := \
        fnic_res.o \
        fnic_fcs.o \
        fnic_scsi.o \
+       fnic_trace.o \
+       fnic_debugfs.o \
        vnic_cq.o \
        vnic_dev.o \
        vnic_intr.o \
index 95a5ba2..98436c3 100644 (file)
@@ -26,6 +26,7 @@
 #include <scsi/libfcoe.h>
 #include "fnic_io.h"
 #include "fnic_res.h"
+#include "fnic_trace.h"
 #include "vnic_dev.h"
 #include "vnic_wq.h"
 #include "vnic_rq.h"
 #define FNIC_TAG_MASK          (BIT(24) - 1)   /* mask for lookup */
 #define FNIC_NO_TAG             -1
 
+/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS                   0
+#define FNIC_IO_INITIALIZED             BIT(0)
+#define FNIC_IO_ISSUED                  BIT(1)
+#define FNIC_IO_DONE                    BIT(2)
+#define FNIC_IO_REQ_NULL                BIT(3)
+#define FNIC_IO_ABTS_PENDING            BIT(4)
+#define FNIC_IO_ABORTED                 BIT(5)
+#define FNIC_IO_ABTS_ISSUED             BIT(6)
+#define FNIC_IO_TERM_ISSUED             BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED    BIT(8)
+#define FNIC_IO_ABT_TERM_DONE           BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL       BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT      BIT(11)
+#define FNIC_DEVICE_RESET               BIT(12)  /* Device reset request */
+#define FNIC_DEV_RST_ISSUED             BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT          BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED        BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED        BIT(16)
+#define FNIC_DEV_RST_DONE               BIT(17)
+#define FNIC_DEV_RST_REQ_NULL           BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE          BIT(19)
+#define FNIC_DEV_RST_TERM_DONE          BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING       BIT(21)
+
 /*
  * Usage of the scsi_cmnd scratchpad.
  * These fields are locked by the hashed io_req_lock.
@@ -64,6 +93,7 @@
 #define CMD_ABTS_STATUS(Cmnd)  ((Cmnd)->SCp.Message)
 #define CMD_LR_STATUS(Cmnd)    ((Cmnd)->SCp.have_data_in)
 #define CMD_TAG(Cmnd)           ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd)         ((Cmnd)->SCp.Status)
 
 #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
 
 #define FNIC_HOST_RESET_TIMEOUT             10000      /* mSec */
 #define FNIC_RMDEVICE_TIMEOUT        1000       /* mSec */
 #define FNIC_HOST_RESET_SETTLE_TIME  30         /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT  500        /* mSec */
 
 #define FNIC_MAX_FCP_TARGET     256
 
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET           BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO          BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE                        (0)
+#define FNIC_FLAGS_FWRESET             (__FNIC_FLAGS_FWRESET | \
+                                       __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED          (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags)  \
+       __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags)  \
+       __fnic_set_state_flags(fnicp, st_flags, 1)
+
 extern unsigned int fnic_log_level;
 
 #define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +219,9 @@ struct fnic {
 
        struct completion *remove_wait; /* device remove thread blocks */
 
+       atomic_t in_flight;             /* io counter */
+       u32 _reserved;                  /* fill hole */
+       unsigned long state_flags;      /* protected by host lock */
        enum fnic_state state;
        spinlock_t fnic_lock;
 
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state);
 void fnic_log_q_error(struct fnic *fnic);
 void fnic_handle_link_event(struct fnic *fnic);
 
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+       return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
 #endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644 (file)
index 0000000..adc1f7f
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+                                 char __user *ubuf,
+                                 size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       int len;
+       len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+                                 const char __user *ubuf,
+                                 size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       fnic_tracing_enabled = val;
+       (*ppos)++;
+
+       return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+                                 struct file *file)
+{
+       fnic_dbgfs_t *fnic_dbg_prt;
+       fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+       if (!fnic_dbg_prt)
+               return -ENOMEM;
+
+       fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+       if (!fnic_dbg_prt->buffer) {
+               kfree(fnic_dbg_prt);
+               return -ENOMEM;
+       }
+       memset((void *)fnic_dbg_prt->buffer, 0,
+                         (3*(trace_max_pages * PAGE_SIZE)));
+       fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+       file->private_data = fnic_dbg_prt;
+       return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+                                       loff_t offset,
+                                       int howto)
+{
+       fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+       loff_t pos = -1;
+
+       switch (howto) {
+       case 0:
+               pos = offset;
+               break;
+       case 1:
+               pos = file->f_pos + offset;
+               break;
+       case 2:
+               pos = fnic_dbg_prt->buffer_len - offset;
+       }
+       return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+                         -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+                                       char __user *ubuf,
+                                       size_t nbytes,
+                                       loff_t *pos)
+{
+       fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+       int rc = 0;
+       rc = simple_read_from_buffer(ubuf, nbytes, pos,
+                                 fnic_dbg_prt->buffer,
+                                 fnic_dbg_prt->buffer_len);
+       return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+                                         struct file *file)
+{
+       fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+       vfree(fnic_dbg_prt->buffer);
+       kfree(fnic_dbg_prt);
+       return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_trace_ctrl_open,
+       .read = fnic_trace_ctrl_read,
+       .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_trace_debugfs_open,
+       .llseek = fnic_trace_debugfs_lseek,
+       .read = fnic_trace_debugfs_read,
+       .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+       int rc = -1;
+       fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+       if (!fnic_trace_debugfs_root) {
+               printk(KERN_DEBUG "Cannot create debugfs root\n");
+               return rc;
+       }
+       fnic_trace_enable = debugfs_create_file("tracing_enable",
+                                         S_IFREG|S_IRUGO|S_IWUSR,
+                                         fnic_trace_debugfs_root,
+                                         NULL, &fnic_trace_ctrl_fops);
+
+       if (!fnic_trace_enable) {
+               printk(KERN_DEBUG "Cannot create trace_enable file"
+                                 " under debugfs");
+               return rc;
+       }
+
+       fnic_trace_debugfs_file = debugfs_create_file("trace",
+                                                 S_IFREG|S_IRUGO|S_IWUSR,
+                                                 fnic_trace_debugfs_root,
+                                                 NULL,
+                                                 &fnic_trace_debugfs_fops);
+
+       if (!fnic_trace_debugfs_file) {
+               printk(KERN_DEBUG "Cannot create trace file under debugfs");
+               return rc;
+       }
+       rc = 0;
+       return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+       if (fnic_trace_debugfs_file) {
+               debugfs_remove(fnic_trace_debugfs_file);
+               fnic_trace_debugfs_file = NULL;
+       }
+       if (fnic_trace_enable) {
+               debugfs_remove(fnic_trace_enable);
+               fnic_trace_enable = NULL;
+       }
+       if (fnic_trace_debugfs_root) {
+               debugfs_remove(fnic_trace_debugfs_root);
+               fnic_trace_debugfs_root = NULL;
+       }
+}
index f0b8969..c35b8f1 100644 (file)
@@ -21,7 +21,7 @@
 #include <scsi/fc/fc_fcp.h>
 
 #define FNIC_DFLT_SG_DESC_CNT  32
-#define FNIC_MAX_SG_DESC_CNT        1024    /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT        256     /* Maximum descriptors per sgl */
 #define FNIC_SG_DESC_ALIGN          16      /* Descriptor address alignment */
 
 struct host_sg_desc {
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
 };
 
 enum fnic_ioreq_state {
-       FNIC_IOREQ_CMD_PENDING = 0,
+       FNIC_IOREQ_NOT_INITED = 0,
+       FNIC_IOREQ_CMD_PENDING,
        FNIC_IOREQ_ABTS_PENDING,
        FNIC_IOREQ_ABTS_COMPLETE,
        FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
        u8 sgl_type; /* device DMA descriptor list type */
        u8 io_completed:1; /* set to 1 when fw completes IO */
        u32 port_id; /* remote port DID */
+       unsigned long start_time; /* in jiffies */
        struct completion *abts_done; /* completion for abts */
        struct completion *dr_done; /* completion for device reset */
 };
index fbf3ac6..d601ac5 100644 (file)
@@ -68,6 +68,10 @@ unsigned int fnic_log_level;
 module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
 
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+                                       "for fnic trace buffer");
 
 static struct libfc_function_template fnic_transport_template = {
        .frame_send = fnic_send,
@@ -624,6 +628,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        fnic->state = FNIC_IN_FC_MODE;
 
+       atomic_set(&fnic->in_flight, 0);
+       fnic->state_flags = FNIC_FLAGS_NONE;
+
        /* Enable hardware stripping of vlan header on ingress */
        fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
 
@@ -858,6 +865,14 @@ static int __init fnic_init_module(void)
 
        printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
 
+       /* Allocate memory for trace buffer */
+       err = fnic_trace_buf_init();
+       if (err < 0) {
+               printk(KERN_ERR PFX "Trace buffer initialization Failed "
+                                 "Fnic Tracing utility is disabled\n");
+               fnic_trace_free();
+       }
+
        /* Create a cache for allocation of default size sgls */
        len = sizeof(struct fnic_dflt_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -928,6 +943,7 @@ err_create_fnic_ioreq_slab:
 err_create_fnic_sgl_slab_max:
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 err_create_fnic_sgl_slab_dflt:
+       fnic_trace_free();
        return err;
 }
 
@@ -939,6 +955,7 @@ static void __exit fnic_cleanup_module(void)
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
        fc_release_transport(fnic_fc_transport);
+       fnic_trace_free();
 }
 
 module_init(fnic_init_module);
index c40ce52..be99e75 100644 (file)
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
 };
 
 static const char *fnic_ioreq_state_str[] = {
+       [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 }
 
 
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+                       unsigned long clearbits)
+{
+       struct Scsi_Host *host = fnic->lport->host;
+       int sh_locked = spin_is_locked(host->host_lock);
+       unsigned long flags = 0;
+
+       if (!sh_locked)
+               spin_lock_irqsave(host->host_lock, flags);
+
+       if (clearbits)
+               fnic->state_flags &= ~st_flags;
+       else
+               fnic->state_flags |= st_flags;
+
+       if (!sh_locked)
+               spin_unlock_irqrestore(host->host_lock, flags);
+
+       return;
+}
+
+
 /*
  * fnic_fw_reset_handler
  * Routine to send reset msg to fw
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
        int ret = 0;
        unsigned long flags;
 
+       /* indicate fwreset to io path */
+       fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
        skb_queue_purge(&fnic->frame_queue);
        skb_queue_purge(&fnic->tx_queue);
 
+       /* wait for io cmpl */
+       while (atomic_read(&fnic->in_flight))
+               schedule_timeout(msecs_to_jiffies(1));
+
        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
        if (!ret)
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Issued fw reset\n");
-       else
+       else {
+               fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Failed to issue fw reset\n");
+       }
+
        return ret;
 }
 
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 
        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+               FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                         "fnic_queue_wq_copy_desc failure - no descriptors\n");
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
  */
 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
-       struct fc_lport *lp;
+       struct fc_lport *lp = shost_priv(sc->device->host);
        struct fc_rport *rport;
-       struct fnic_io_req *io_req;
-       struct fnic *fnic;
+       struct fnic_io_req *io_req = NULL;
+       struct fnic *fnic = lport_priv(lp);
        struct vnic_wq_copy *wq;
        int ret;
-       int sg_count;
+       u64 cmd_trace;
+       int sg_count = 0;
        unsigned long flags;
        unsigned long ptr;
 
+       if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
        rport = starget_to_rport(scsi_target(sc->device));
        ret = fc_remote_port_chkready(rport);
        if (ret) {
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                return 0;
        }
 
-       lp = shost_priv(sc->device->host);
        if (lp->state != LPORT_ST_READY || !(lp->link_up))
                return SCSI_MLQUEUE_HOST_BUSY;
 
+       atomic_inc(&fnic->in_flight);
+
        /*
         * Release host lock, use driver resource specific locks from here.
         * Don't re-enable interrupts in case they were disabled prior to the
         * caller disabling them.
         */
        spin_unlock(lp->host->host_lock);
+       CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+       CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 
        /* Get a new io_req for this SCSI IO */
-       fnic = lport_priv(lp);
-
        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
        if (!io_req) {
                ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        /* Map the data buffer */
        sg_count = scsi_dma_map(sc);
        if (sg_count < 0) {
+               FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+                         sc->request->tag, sc, 0, sc->cmnd[0],
+                         sg_count, CMD_STATE(sc));
                mempool_free(io_req, fnic->io_req_pool);
                goto out;
        }
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
 
        /* initialize rest of io_req */
        io_req->port_id = rport->port_id;
+       io_req->start_time = jiffies;
        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
        CMD_SP(sc) = (char *)io_req;
+       CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
        sc->scsi_done = done;
 
        /* create copy wq desc and enqueue it */
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                 * refetch the pointer under the lock.
                 */
                spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+               FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+                         sc->request->tag, sc, 0, 0, 0,
+                         (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
                spin_lock_irqsave(io_lock, flags);
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                CMD_SP(sc) = NULL;
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                        fnic_release_ioreq_buf(fnic, io_req, sc);
                        mempool_free(io_req, fnic->io_req_pool);
                }
+       } else {
+               /* REVISIT: Use per IO lock in the final code */
+               CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
        }
 out:
+       cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+                       (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+                       (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+                       sc->cmnd[5]);
+
+       FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+                 sc->request->tag, sc, io_req,
+                 sg_count, cmd_trace,
+                 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+       atomic_dec(&fnic->in_flight);
        /* acquire host lock before returning to SCSI */
        spin_lock(lp->host->host_lock);
        return ret;
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
        fnic_flush_tx(fnic);
 
  reset_cmpl_handler_end:
+       fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
        return ret;
 }
 
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
        struct vnic_wq_copy *wq;
        u16 request_out = desc->u.ack.request_out;
        unsigned long flags;
+       u64 *ox_id_tag = (u64 *)(void *)desc;
 
        /* mark the ack state */
        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
                fnic->fw_ack_recd[0] = 1;
        }
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+       FNIC_TRACE(fnic_fcpio_ack_handler,
+                 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+                 ox_id_tag[4], ox_id_tag[5]);
 }
 
 /*
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        struct scsi_cmnd *sc;
        unsigned long flags;
        spinlock_t *io_lock;
+       u64 cmd_trace;
+       unsigned long start_time;
 
        /* Decode the cmpl description to get the io_req id */
        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
        fcpio_tag_id_dec(&tag, &id);
+       icmnd_cmpl = &desc->u.icmnd_cmpl;
 
-       if (id >= FNIC_MAX_IO_REQ)
+       if (id >= FNIC_MAX_IO_REQ) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                       "Tag out of range tag %x hdr status = %s\n",
+                            id, fnic_fcpio_status_to_str(hdr_status));
                return;
+       }
 
        sc = scsi_host_find_tag(fnic->lport->host, id);
        WARN_ON_ONCE(!sc);
-       if (!sc)
+       if (!sc) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "icmnd_cmpl sc is null - "
+                         "hdr status = %s tag = 0x%x desc = 0x%p\n",
+                         fnic_fcpio_status_to_str(hdr_status), id, desc);
+               FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+                         fnic->lport->host->host_no, id,
+                         ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+                         (u64)icmnd_cmpl->_resvd0[0]),
+                         ((u64)hdr_status << 16 |
+                         (u64)icmnd_cmpl->scsi_status << 8 |
+                         (u64)icmnd_cmpl->flags), desc,
+                         (u64)icmnd_cmpl->residual, 0);
                return;
+       }
 
        io_lock = fnic_io_lock_hash(fnic, sc);
        spin_lock_irqsave(io_lock, flags);
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        WARN_ON_ONCE(!io_req);
        if (!io_req) {
+               CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
                spin_unlock_irqrestore(io_lock, flags);
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "icmnd_cmpl io_req is null - "
+                         "hdr status = %s tag = 0x%x sc 0x%p\n",
+                         fnic_fcpio_status_to_str(hdr_status), id, sc);
                return;
        }
+       start_time = io_req->start_time;
 
        /* firmware completed the io */
        io_req->io_completed = 1;
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
         */
        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+               switch (hdr_status) {
+               case FCPIO_SUCCESS:
+                       CMD_FLAGS(sc) |= FNIC_IO_DONE;
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                 "icmnd_cmpl ABTS pending hdr status = %s "
+                                 "sc  0x%p scsi_status %x  residual %d\n",
+                                 fnic_fcpio_status_to_str(hdr_status), sc,
+                                 icmnd_cmpl->scsi_status,
+                                 icmnd_cmpl->residual);
+                       break;
+               case FCPIO_ABORTED:
+                       CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+                       break;
+               default:
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                         "icmnd_cmpl abts pending "
+                                         "hdr status = %s tag = 0x%x sc = 0x%p\n",
+                                         fnic_fcpio_status_to_str(hdr_status),
+                                         id, sc);
+                       break;
+               }
                return;
        }
 
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 
        /* Break link with the SCSI command */
        CMD_SP(sc) = NULL;
+       CMD_FLAGS(sc) |= FNIC_IO_DONE;
 
        spin_unlock_irqrestore(io_lock, flags);
 
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 
        mempool_free(io_req, fnic->io_req_pool);
 
+       cmd_trace = ((u64)hdr_status << 56) |
+                 (u64)icmnd_cmpl->scsi_status << 48 |
+                 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+                 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+       FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+                 sc->device->host->host_no, id, sc,
+                 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+                 (u64)icmnd_cmpl->_resvd0[0] << 48 |
+                 jiffies_to_msecs(jiffies - start_time)),
+                 desc, cmd_trace,
+                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
                fnic->lport->host_stats.fcp_input_requests++;
                fnic->fcp_input_bytes += xfer_len;
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        /* Call SCSI completion function to complete the IO */
        if (sc->scsi_done)
                sc->scsi_done(sc);
-
 }
 
 /* fnic_fcpio_itmf_cmpl_handler
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
        struct fnic_io_req *io_req;
        unsigned long flags;
        spinlock_t *io_lock;
+       unsigned long start_time;
 
        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
        fcpio_tag_id_dec(&tag, &id);
 
-       if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+       if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+               "Tag out of range tag %x hdr status = %s\n",
+               id, fnic_fcpio_status_to_str(hdr_status));
                return;
+       }
 
        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
        WARN_ON_ONCE(!sc);
-       if (!sc)
+       if (!sc) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+                         fnic_fcpio_status_to_str(hdr_status), id);
                return;
-
+       }
        io_lock = fnic_io_lock_hash(fnic, sc);
        spin_lock_irqsave(io_lock, flags);
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        WARN_ON_ONCE(!io_req);
        if (!io_req) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "itmf_cmpl io_req is null - "
+                         "hdr status = %s tag = 0x%x sc 0x%p\n",
+                         fnic_fcpio_status_to_str(hdr_status), id, sc);
                return;
        }
+       start_time = io_req->start_time;
 
-       if (id & FNIC_TAG_ABORT) {
+       if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+               /* Abort and terminate completion of device reset req */
+               /* REVISIT : Add asserts about various flags */
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                             "dev reset abts cmpl recd. id %x status %s\n",
+                             id, fnic_fcpio_status_to_str(hdr_status));
+               CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+               CMD_ABTS_STATUS(sc) = hdr_status;
+               CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+               if (io_req->abts_done)
+                       complete(io_req->abts_done);
+               spin_unlock_irqrestore(io_lock, flags);
+       } else if (id & FNIC_TAG_ABORT) {
                /* Completion of abort cmd */
                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
                        /* This is a late completion. Ignore it */
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
                CMD_ABTS_STATUS(sc) = hdr_status;
 
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "abts cmpl recd. id %d status %s\n",
                              (int)(id & FNIC_TAG_MASK),
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
 
                        fnic_release_ioreq_buf(fnic, io_req, sc);
                        mempool_free(io_req, fnic->io_req_pool);
-                       if (sc->scsi_done)
+                       if (sc->scsi_done) {
+                               FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+                                       sc->device->host->host_no, id,
+                                       sc,
+                                       jiffies_to_msecs(jiffies - start_time),
+                                       desc,
+                                       (((u64)hdr_status << 40) |
+                                       (u64)sc->cmnd[0] << 32 |
+                                       (u64)sc->cmnd[2] << 24 |
+                                       (u64)sc->cmnd[3] << 16 |
+                                       (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                                       (((u64)CMD_FLAGS(sc) << 32) |
+                                       CMD_STATE(sc)));
                                sc->scsi_done(sc);
+                       }
                }
 
        } else if (id & FNIC_TAG_DEV_RST) {
                /* Completion of device reset */
                CMD_LR_STATUS(sc) = hdr_status;
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+                       FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+                                 sc->device->host->host_no, id, sc,
+                                 jiffies_to_msecs(jiffies - start_time),
+                                 desc, 0,
+                                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "Terminate pending "
+                               "dev reset cmpl recd. id %d status %s\n",
+                               (int)(id & FNIC_TAG_MASK),
+                               fnic_fcpio_status_to_str(hdr_status));
+                       return;
+               }
+               if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+                       /* Need to wait for terminate completion */
+                       spin_unlock_irqrestore(io_lock, flags);
+                       FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+                                 sc->device->host->host_no, id, sc,
+                                 jiffies_to_msecs(jiffies - start_time),
+                                 desc, 0,
+                                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "dev reset cmpl recd after time out. "
+                               "id %d status %s\n",
+                               (int)(id & FNIC_TAG_MASK),
+                               fnic_fcpio_status_to_str(hdr_status));
+                       return;
+               }
                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+               CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "dev reset cmpl recd. id %d status %s\n",
                              (int)(id & FNIC_TAG_MASK),
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
                                   struct fcpio_fw_req *desc)
 {
        struct fnic *fnic = vnic_dev_priv(vdev);
-       int ret = 0;
 
        switch (desc->hdr.type) {
        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
 
        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
-               ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+               fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
                break;
 
        case FCPIO_RESET_CMPL: /* fw completed reset */
-               ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+               fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
                break;
 
        default:
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
                break;
        }
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
        unsigned long flags = 0;
        struct scsi_cmnd *sc;
        spinlock_t *io_lock;
+       unsigned long start_time = 0;
 
        for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
                if (i == exclude_id)
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
                io_lock = fnic_io_lock_hash(fnic, sc);
                spin_lock_irqsave(io_lock, flags);
                io_req = (struct fnic_io_req *)CMD_SP(sc);
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+                       /*
+                        * We will be here only when FW completes reset
+                        * without sending completions for outstanding ios.
+                        */
+                       CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+                       if (io_req && io_req->dr_done)
+                               complete(io_req->dr_done);
+                       else if (io_req && io_req->abts_done)
+                               complete(io_req->abts_done);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
                if (!io_req) {
                        spin_unlock_irqrestore(io_lock, flags);
                        goto cleanup_scsi_cmd;
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
                 * If there is a scsi_cmnd associated with this io_req, then
                 * free the corresponding state
                 */
+               start_time = io_req->start_time;
                fnic_release_ioreq_buf(fnic, io_req, sc);
                mempool_free(io_req, fnic->io_req_pool);
 
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd:
                              " DID_TRANSPORT_DISRUPTED\n");
 
                /* Complete the command to SCSI */
-               if (sc->scsi_done)
+               if (sc->scsi_done) {
+                       FNIC_TRACE(fnic_cleanup_io,
+                                 sc->device->host->host_no, i, sc,
+                                 jiffies_to_msecs(jiffies - start_time),
+                                 0, ((u64)sc->cmnd[0] << 32 |
+                                 (u64)sc->cmnd[2] << 24 |
+                                 (u64)sc->cmnd[3] << 16 |
+                                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
                        sc->scsi_done(sc);
+               }
        }
 }
 
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
        struct scsi_cmnd *sc;
        unsigned long flags;
        spinlock_t *io_lock;
+       unsigned long start_time = 0;
 
        /* get the tag reference */
        fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
 
        spin_unlock_irqrestore(io_lock, flags);
 
+       start_time = io_req->start_time;
        fnic_release_ioreq_buf(fnic, io_req, sc);
        mempool_free(io_req, fnic->io_req_pool);
 
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd:
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
                      " DID_NO_CONNECT\n");
 
-       if (sc->scsi_done)
+       if (sc->scsi_done) {
+               FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+                         sc->device->host->host_no, id, sc,
+                         jiffies_to_msecs(jiffies - start_time),
+                         0, ((u64)sc->cmnd[0] << 32 |
+                         (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                         (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                         (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
                sc->scsi_done(sc);
+       }
 }
 
 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
                                          struct fnic_io_req *io_req)
 {
        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+       struct Scsi_Host *host = fnic->lport->host;
        unsigned long flags;
 
+       spin_lock_irqsave(host->host_lock, flags);
+       if (unlikely(fnic_chk_state_flags_locked(fnic,
+                                               FNIC_FLAGS_IO_BLOCKED))) {
+               spin_unlock_irqrestore(host->host_lock, flags);
+               return 1;
+       } else
+               atomic_inc(&fnic->in_flight);
+       spin_unlock_irqrestore(host->host_lock, flags);
+
        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
 
        if (!vnic_wq_copy_desc_avail(wq)) {
                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+               atomic_dec(&fnic->in_flight);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_queue_abort_io_req: failure: no descriptors\n");
                return 1;
        }
        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
                                     fnic->config.ra_tov, fnic->config.ed_tov);
 
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+       atomic_dec(&fnic->in_flight);
+
        return 0;
 }
 
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 {
        int tag;
+       int abt_tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 
        FNIC_SCSI_DBG(KERN_DEBUG,
                      fnic->lport->host,
-                     "fnic_rport_reset_exch called portid 0x%06x\n",
+                     "fnic_rport_exch_reset called portid 0x%06x\n",
                      port_id);
 
        if (fnic->in_remove)
                return;
 
        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+               abt_tag = tag;
                sc = scsi_host_find_tag(fnic->lport->host, tag);
                if (!sc)
                        continue;
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                        continue;
                }
 
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+                       sc);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+
                /*
                 * Found IO that is still pending with firmware and
                 * belongs to rport that went away
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                        spin_unlock_irqrestore(io_lock, flags);
                        continue;
                }
+               if (io_req->abts_done) {
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                       "fnic_rport_exch_reset: io_req->abts_done is set "
+                       "state is %s\n",
+                       fnic_ioreq_state_to_str(CMD_STATE(sc)));
+               }
+
+               if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                                 "rport_exch_reset "
+                                 "IO not yet issued %p tag 0x%x flags "
+                                 "%x state %d\n",
+                                 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+               }
                old_ioreq_state = CMD_STATE(sc);
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+               if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       abt_tag = (tag | FNIC_TAG_DEV_RST);
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_rport_exch_reset dev rst sc 0x%p\n",
+                       sc);
+               }
 
                BUG_ON(io_req->abts_done);
 
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                /* Now queue the abort command to firmware */
                int_to_scsilun(sc->device->lun, &fc_lun);
 
-               if (fnic_queue_abort_io_req(fnic, tag,
+               if (fnic_queue_abort_io_req(fnic, abt_tag,
                                            FCPIO_ITMF_ABT_TASK_TERM,
                                            fc_lun.scsi_lun, io_req)) {
                        /*
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                         * aborted later by scsi_eh, or cleaned up during
                         * lun reset
                         */
-                       io_lock = fnic_io_lock_hash(fnic, sc);
-
                        spin_lock_irqsave(io_lock, flags);
                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
                                CMD_STATE(sc) = old_ioreq_state;
                        spin_unlock_irqrestore(io_lock, flags);
+               } else {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                       else
+                               CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+                       spin_unlock_irqrestore(io_lock, flags);
                }
        }
 
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 void fnic_terminate_rport_io(struct fc_rport *rport)
 {
        int tag;
+       int abt_tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
 
        FNIC_SCSI_DBG(KERN_DEBUG,
                      fnic->lport->host, "fnic_terminate_rport_io called"
-                     " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
-                     rport->port_name, rport->node_name,
+                     " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+                     rport->port_name, rport->node_name, rport,
                      rport->port_id);
 
        if (fnic->in_remove)
                return;
 
        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+               abt_tag = tag;
                sc = scsi_host_find_tag(fnic->lport->host, tag);
                if (!sc)
                        continue;
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                        continue;
                }
 
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+                       sc);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
                /*
                 * Found IO that is still pending with firmware and
                 * belongs to rport that went away
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                        spin_unlock_irqrestore(io_lock, flags);
                        continue;
                }
+               if (io_req->abts_done) {
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                       "fnic_terminate_rport_io: io_req->abts_done is set "
+                       "state is %s\n",
+                       fnic_ioreq_state_to_str(CMD_STATE(sc)));
+               }
+               if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                 "fnic_terminate_rport_io "
+                                 "IO not yet issued %p tag 0x%x flags "
+                                 "%x state %d\n",
+                                 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+               }
                old_ioreq_state = CMD_STATE(sc);
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+               if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       abt_tag = (tag | FNIC_TAG_DEV_RST);
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+               }
 
                BUG_ON(io_req->abts_done);
 
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                /* Now queue the abort command to firmware */
                int_to_scsilun(sc->device->lun, &fc_lun);
 
-               if (fnic_queue_abort_io_req(fnic, tag,
+               if (fnic_queue_abort_io_req(fnic, abt_tag,
                                            FCPIO_ITMF_ABT_TASK_TERM,
                                            fc_lun.scsi_lun, io_req)) {
                        /*
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                         * aborted later by scsi_eh, or cleaned up during
                         * lun reset
                         */
-                       io_lock = fnic_io_lock_hash(fnic, sc);
-
                        spin_lock_irqsave(io_lock, flags);
                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
                                CMD_STATE(sc) = old_ioreq_state;
                        spin_unlock_irqrestore(io_lock, flags);
+               } else {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                       else
+                               CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+                       spin_unlock_irqrestore(io_lock, flags);
                }
        }
 
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 {
        struct fc_lport *lp;
        struct fnic *fnic;
-       struct fnic_io_req *io_req;
+       struct fnic_io_req *io_req = NULL;
        struct fc_rport *rport;
        spinlock_t *io_lock;
        unsigned long flags;
+       unsigned long start_time = 0;
        int ret = SUCCESS;
-       u32 task_req;
+       u32 task_req = 0;
        struct scsi_lun fc_lun;
+       int tag;
        DECLARE_COMPLETION_ONSTACK(tm_done);
 
        /* Wait for rport to unblock */
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 
        fnic = lport_priv(lp);
        rport = starget_to_rport(scsi_target(sc->device));
-       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-                       "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
-                       rport->port_id, sc->device->lun, sc->request->tag);
+       tag = sc->request->tag;
+       FNIC_SCSI_DBG(KERN_DEBUG,
+               fnic->lport->host,
+               "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+               rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+       CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 
        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
                ret = FAILED;
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
+       if (task_req == FCPIO_ITMF_ABT_TASK)
+               CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+       else
+               CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
 
        /*
         * We queued an abort IO, wait for its completion.
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        if (!io_req) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        /* fw did not complete abort, timed out */
        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 
        spin_unlock_irqrestore(io_lock, flags);
 
+       start_time = io_req->start_time;
        fnic_release_ioreq_buf(fnic, io_req, sc);
        mempool_free(io_req, fnic->io_req_pool);
 
 fnic_abort_cmd_end:
+       FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+                 sc->request->tag, sc,
+                 jiffies_to_msecs(jiffies - start_time),
+                 0, ((u64)sc->cmnd[0] << 32 |
+                 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-                     "Returning from abort cmd %s\n",
+                     "Returning from abort cmd type %x %s\n", task_req,
                      (ret == SUCCESS) ?
                      "SUCCESS" : "FAILED");
        return ret;
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
                                       struct fnic_io_req *io_req)
 {
        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+       struct Scsi_Host *host = fnic->lport->host;
        struct scsi_lun fc_lun;
        int ret = 0;
        unsigned long intr_flags;
 
+       spin_lock_irqsave(host->host_lock, intr_flags);
+       if (unlikely(fnic_chk_state_flags_locked(fnic,
+                                               FNIC_FLAGS_IO_BLOCKED))) {
+               spin_unlock_irqrestore(host->host_lock, intr_flags);
+               return FAILED;
+       } else
+               atomic_inc(&fnic->in_flight);
+       spin_unlock_irqrestore(host->host_lock, intr_flags);
+
        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 
        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
                free_wq_copy_descs(fnic, wq);
 
        if (!vnic_wq_copy_desc_avail(wq)) {
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                         "queue_dr_io_req failure - no descriptors\n");
                ret = -EAGAIN;
                goto lr_io_req_end;
        }
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
 
 lr_io_req_end:
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+       atomic_dec(&fnic->in_flight);
 
        return ret;
 }
@@ -1412,7 +1773,7 @@ lr_io_req_end:
 static int fnic_clean_pending_aborts(struct fnic *fnic,
                                     struct scsi_cmnd *lr_sc)
 {
-       int tag;
+       int tag, abt_tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
        struct scsi_lun fc_lun;
        struct scsi_device *lun_dev = lr_sc->device;
        DECLARE_COMPLETION_ONSTACK(tm_done);
+       enum fnic_ioreq_state old_ioreq_state;
 
        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
                sc = scsi_host_find_tag(fnic->lport->host, tag);
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                              "Found IO in %s on lun\n",
                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
 
-               BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                               "%s dev rst not pending sc 0x%p\n", __func__,
+                               sc);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+               old_ioreq_state = CMD_STATE(sc);
+               /*
+                * Any pending IO issued prior to reset is expected to be
+                * in abts pending state, if not we need to set
+                * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+                * When IO is completed, the IO will be handed over and
+                * handled in this function.
+                */
+               CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+               if (io_req->abts_done)
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                         "%s: io_req->abts_done is set state is %s\n",
+                         __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+               BUG_ON(io_req->abts_done);
+
+               abt_tag = tag;
+               if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       abt_tag |= FNIC_TAG_DEV_RST;
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                 "%s: dev rst sc 0x%p\n", __func__, sc);
+               }
 
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
                io_req->abts_done = &tm_done;
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                /* Now queue the abort command to firmware */
                int_to_scsilun(sc->device->lun, &fc_lun);
 
-               if (fnic_queue_abort_io_req(fnic, tag,
+               if (fnic_queue_abort_io_req(fnic, abt_tag,
                                            FCPIO_ITMF_ABT_TASK_TERM,
                                            fc_lun.scsi_lun, io_req)) {
                        spin_lock_irqsave(io_lock, flags);
                        io_req = (struct fnic_io_req *)CMD_SP(sc);
                        if (io_req)
                                io_req->abts_done = NULL;
+                       if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+                               CMD_STATE(sc) = old_ioreq_state;
                        spin_unlock_irqrestore(io_lock, flags);
                        ret = 1;
                        goto clean_pending_aborts_end;
+               } else {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                       spin_unlock_irqrestore(io_lock, flags);
                }
+               CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
 
                wait_for_completion_timeout(&tm_done,
                                            msecs_to_jiffies
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                if (!io_req) {
                        spin_unlock_irqrestore(io_lock, flags);
-                       ret = 1;
-                       goto clean_pending_aborts_end;
+                       CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+                       continue;
                }
 
                io_req->abts_done = NULL;
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                /* if abort is still pending with fw, fail */
                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
                        spin_unlock_irqrestore(io_lock, flags);
+                       CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
                        ret = 1;
                        goto clean_pending_aborts_end;
                }
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                mempool_free(io_req, fnic->io_req_pool);
        }
 
+       schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+       /* walk again to check, if IOs are still pending in fw */
+       if (fnic_is_abts_pending(fnic, lr_sc))
+               ret = FAILED;
+
 clean_pending_aborts_end:
        return ret;
 }
 
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+       struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+       int tag, ret = SCSI_NO_TAG;
+
+       BUG_ON(!bqt);
+       if (!bqt) {
+               pr_err("Tags are not supported\n");
+               goto end;
+       }
+
+       do {
+               tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+               if (tag >= bqt->max_depth) {
+                       pr_err("Tag allocation failure\n");
+                       goto end;
+               }
+       } while (test_and_set_bit(tag, bqt->tag_map));
+
+       bqt->tag_index[tag] = sc->request;
+       sc->request->tag = tag;
+       sc->tag = tag;
+       if (!sc->request->special)
+               sc->request->special = sc;
+
+       ret = tag;
+
+end:
+       return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+       struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+       int tag = sc->request->tag;
+
+       if (tag == SCSI_NO_TAG)
+               return;
+
+       BUG_ON(!bqt || !bqt->tag_index[tag]);
+       if (!bqt)
+               return;
+
+       bqt->tag_index[tag] = NULL;
+       clear_bit(tag, bqt->tag_map);
+
+       return;
+}
+
 /*
  * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
  * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
 {
        struct fc_lport *lp;
        struct fnic *fnic;
-       struct fnic_io_req *io_req;
+       struct fnic_io_req *io_req = NULL;
        struct fc_rport *rport;
        int status;
        int ret = FAILED;
        spinlock_t *io_lock;
        unsigned long flags;
+       unsigned long start_time = 0;
+       struct scsi_lun fc_lun;
+       int tag = 0;
        DECLARE_COMPLETION_ONSTACK(tm_done);
+       int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
 
        /* Wait for rport to unblock */
        fc_block_scsi_eh(sc);
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
 
        rport = starget_to_rport(scsi_target(sc->device));
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-                       "Device reset called FCID 0x%x, LUN 0x%x\n",
-                       rport->port_id, sc->device->lun);
+                     "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+                     rport->port_id, sc->device->lun, sc);
 
        if (lp->state != LPORT_ST_READY || !(lp->link_up))
                goto fnic_device_reset_end;
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        if (fc_remote_port_chkready(rport))
                goto fnic_device_reset_end;
 
+       CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+       /* Allocate tag if not present */
+
+       tag = sc->request->tag;
+       if (unlikely(tag < 0)) {
+               tag = fnic_scsi_host_start_tag(fnic, sc);
+               if (unlikely(tag == SCSI_NO_TAG))
+                       goto fnic_device_reset_end;
+               tag_gen_flag = 1;
+       }
        io_lock = fnic_io_lock_hash(fnic, sc);
        spin_lock_irqsave(io_lock, flags);
        io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
        spin_unlock_irqrestore(io_lock, flags);
 
-       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
-                     sc->request->tag);
+       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
 
        /*
         * issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
                        io_req->dr_done = NULL;
                goto fnic_device_reset_clean;
        }
+       spin_lock_irqsave(io_lock, flags);
+       CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+       spin_unlock_irqrestore(io_lock, flags);
 
        /*
         * Wait on the local completion for LUN reset.  The io_req may be
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        if (!io_req) {
                spin_unlock_irqrestore(io_lock, flags);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
                goto fnic_device_reset_end;
        }
        io_req->dr_done = NULL;
 
        status = CMD_LR_STATUS(sc);
-       spin_unlock_irqrestore(io_lock, flags);
 
        /*
         * If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        if (status == FCPIO_INVALID_CODE) {
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Device reset timed out\n");
-               goto fnic_device_reset_end;
+               CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+               spin_unlock_irqrestore(io_lock, flags);
+               int_to_scsilun(sc->device->lun, &fc_lun);
+               /*
+                * Issue abort and terminate on the device reset request.
+                * If q'ing of the abort fails, retry issue it after a delay.
+                */
+               while (1) {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+                               spin_unlock_irqrestore(io_lock, flags);
+                               break;
+                       }
+                       spin_unlock_irqrestore(io_lock, flags);
+                       if (fnic_queue_abort_io_req(fnic,
+                               tag | FNIC_TAG_DEV_RST,
+                               FCPIO_ITMF_ABT_TASK_TERM,
+                               fc_lun.scsi_lun, io_req)) {
+                               wait_for_completion_timeout(&tm_done,
+                               msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+                       } else {
+                               spin_lock_irqsave(io_lock, flags);
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                               CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+                               io_req->abts_done = &tm_done;
+                               spin_unlock_irqrestore(io_lock, flags);
+                               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "Abort and terminate issued on Device reset "
+                               "tag 0x%x sc 0x%p\n", tag, sc);
+                               break;
+                       }
+               }
+               while (1) {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+                               spin_unlock_irqrestore(io_lock, flags);
+                               wait_for_completion_timeout(&tm_done,
+                               msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+                               break;
+                       } else {
+                               io_req = (struct fnic_io_req *)CMD_SP(sc);
+                               io_req->abts_done = NULL;
+                               goto fnic_device_reset_clean;
+                       }
+               }
+       } else {
+               spin_unlock_irqrestore(io_lock, flags);
        }
 
        /* Completed, but not successful, clean up the io_req, return fail */
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean:
        spin_unlock_irqrestore(io_lock, flags);
 
        if (io_req) {
+               start_time = io_req->start_time;
                fnic_release_ioreq_buf(fnic, io_req, sc);
                mempool_free(io_req, fnic->io_req_pool);
        }
 
 fnic_device_reset_end:
+       FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+                 sc->request->tag, sc,
+                 jiffies_to_msecs(jiffies - start_time),
+                 0, ((u64)sc->cmnd[0] << 32 |
+                 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+       /* free tag if it is allocated */
+       if (unlikely(tag_gen_flag))
+               fnic_scsi_host_end_tag(fnic, sc);
+
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                      "Returning from device reset %s\n",
                      (ret == SUCCESS) ?
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
        DECLARE_COMPLETION_ONSTACK(remove_wait);
 
        /* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
        spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+               /* fw reset is in progress, poll for its completion */
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               schedule_timeout(msecs_to_jiffies(100));
+               goto retry_fw_reset;
+       }
+
        fnic->remove_wait = &remove_wait;
        old_state = fnic->state;
        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
        struct fnic *fnic = lport_priv(lp);
 
        /* issue fw reset */
+retry_fw_reset:
        spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+               /* fw reset is in progress, poll for its completion */
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               schedule_timeout(msecs_to_jiffies(100));
+               goto retry_fw_reset;
+       }
        old_state = fnic->state;
        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset:
        fc_exch_mgr_reset(lp, sid, did);
 
 }
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+       int tag;
+       struct fnic_io_req *io_req;
+       spinlock_t *io_lock;
+       unsigned long flags;
+       int ret = 0;
+       struct scsi_cmnd *sc;
+       struct scsi_device *lun_dev = NULL;
+
+       if (lr_sc)
+               lun_dev = lr_sc->device;
+
+       /* walk again to check, if IOs are still pending in fw */
+       for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+               sc = scsi_host_find_tag(fnic->lport->host, tag);
+               /*
+                * ignore this lun reset cmd or cmds that do not belong to
+                * this lun
+                */
+               if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+                       continue;
+
+               io_lock = fnic_io_lock_hash(fnic, sc);
+               spin_lock_irqsave(io_lock, flags);
+
+               io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+               if (!io_req || sc->device != lun_dev) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+
+               /*
+                * Found IO that is still pending with firmware and
+                * belongs to the LUN that we are resetting
+                */
+               FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                             "Found IO in %s on lun\n",
+                             fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       ret = 1;
+                       continue;
+               }
+       }
+
+       return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644 (file)
index 0000000..23a60e3
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+       unsigned long fnic_buf_head;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic_trace_lock, flags);
+
+       /*
+        * Get next available memory location for writing trace information
+        * at @wr_idx and increment @wr_idx
+        */
+       fnic_buf_head =
+               fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+       fnic_trace_entries.wr_idx++;
+
+       /*
+        * Verify if trace buffer is full then change wd_idx to
+        * start from zero
+        */
+       if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+               fnic_trace_entries.wr_idx = 0;
+
+       /*
+        * Verify if write index @wr_idx and read index @rd_idx are same then
+        * increment @rd_idx to move to next entry in trace buffer
+        */
+       if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+               fnic_trace_entries.rd_idx++;
+               if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+                       fnic_trace_entries.rd_idx = 0;
+       }
+       spin_unlock_irqrestore(&fnic_trace_lock, flags);
+       return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+       int rd_idx;
+       int wr_idx;
+       int len = 0;
+       unsigned long flags;
+       char str[KSYM_SYMBOL_LEN];
+       struct timespec val;
+       fnic_trace_data_t *tbp;
+
+       spin_lock_irqsave(&fnic_trace_lock, flags);
+       rd_idx = fnic_trace_entries.rd_idx;
+       wr_idx = fnic_trace_entries.wr_idx;
+       if (wr_idx < rd_idx) {
+               while (1) {
+                       /* Start from read index @rd_idx */
+                       tbp = (fnic_trace_data_t *)
+                                 fnic_trace_entries.page_offset[rd_idx];
+                       if (!tbp) {
+                               spin_unlock_irqrestore(&fnic_trace_lock, flags);
+                               return 0;
+                       }
+                       /* Convert function pointer to function name */
+                       if (sizeof(unsigned long) < 8) {
+                               sprint_symbol(str, tbp->fnaddr.low);
+                               jiffies_to_timespec(tbp->timestamp.low, &val);
+                       } else {
+                               sprint_symbol(str, tbp->fnaddr.val);
+                               jiffies_to_timespec(tbp->timestamp.val, &val);
+                       }
+                       /*
+                        * Dump trace buffer entry to memory file
+                        * and increment read index @rd_idx
+                        */
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                                 (trace_max_pages * PAGE_SIZE * 3) - len,
+                                 "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+                                 "%16llx %16llx %16llx\n", val.tv_sec,
+                                 val.tv_nsec, str, tbp->host_no, tbp->tag,
+                                 tbp->data[0], tbp->data[1], tbp->data[2],
+                                 tbp->data[3], tbp->data[4]);
+                       rd_idx++;
+                       /*
+                        * If rd_idx is reached to maximum trace entries
+                        * then move rd_idx to zero
+                        */
+                       if (rd_idx > (fnic_max_trace_entries-1))
+                               rd_idx = 0;
+                       /*
+                        * Continure dumpping trace buffer entries into
+                        * memory file till rd_idx reaches write index
+                        */
+                       if (rd_idx == wr_idx)
+                               break;
+               }
+       } else if (wr_idx > rd_idx) {
+               while (1) {
+                       /* Start from read index @rd_idx */
+                       tbp = (fnic_trace_data_t *)
+                                 fnic_trace_entries.page_offset[rd_idx];
+                       if (!tbp) {
+                               spin_unlock_irqrestore(&fnic_trace_lock, flags);
+                               return 0;
+                       }
+                       /* Convert function pointer to function name */
+                       if (sizeof(unsigned long) < 8) {
+                               sprint_symbol(str, tbp->fnaddr.low);
+                               jiffies_to_timespec(tbp->timestamp.low, &val);
+                       } else {
+                               sprint_symbol(str, tbp->fnaddr.val);
+                               jiffies_to_timespec(tbp->timestamp.val, &val);
+                       }
+                       /*
+                        * Dump trace buffer entry to memory file
+                        * and increment read index @rd_idx
+                        */
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                                 (trace_max_pages * PAGE_SIZE * 3) - len,
+                                 "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+                                 "%16llx %16llx %16llx\n", val.tv_sec,
+                                 val.tv_nsec, str, tbp->host_no, tbp->tag,
+                                 tbp->data[0], tbp->data[1], tbp->data[2],
+                                 tbp->data[3], tbp->data[4]);
+                       rd_idx++;
+                       /*
+                        * Continue dumpping trace buffer entries into
+                        * memory file till rd_idx reaches write index
+                        */
+                       if (rd_idx == wr_idx)
+                               break;
+               }
+       }
+       spin_unlock_irqrestore(&fnic_trace_lock, flags);
+       return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+       unsigned long fnic_buf_head;
+       int i;
+       int err = 0;
+
+       trace_max_pages = fnic_trace_max_pages;
+       fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+                                         FNIC_ENTRY_SIZE_BYTES;
+
+       fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+       if (!fnic_trace_buf_p) {
+               printk(KERN_ERR PFX "Failed to allocate memory "
+                                 "for fnic_trace_buf_p\n");
+               err = -ENOMEM;
+               goto err_fnic_trace_buf_init;
+       }
+       memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+       fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+                                                 sizeof(unsigned long));
+       if (!fnic_trace_entries.page_offset) {
+               printk(KERN_ERR PFX "Failed to allocate memory for"
+                                 " page_offset\n");
+               if (fnic_trace_buf_p) {
+                       vfree((void *)fnic_trace_buf_p);
+                       fnic_trace_buf_p = 0;
+               }
+               err = -ENOMEM;
+               goto err_fnic_trace_buf_init;
+       }
+       memset((void *)fnic_trace_entries.page_offset, 0,
+                 (fnic_max_trace_entries * sizeof(unsigned long)));
+       fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+       fnic_buf_head = fnic_trace_buf_p;
+
+       /*
+        * Set page_offset field of fnic_trace_entries struct by
+        * calculating memory location for every trace entry using
+        * length of each trace entry
+        */
+       for (i = 0; i < fnic_max_trace_entries; i++) {
+               fnic_trace_entries.page_offset[i] = fnic_buf_head;
+               fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+       }
+       err = fnic_trace_debugfs_init();
+       if (err < 0) {
+               printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+               goto err_fnic_trace_debugfs_init;
+       }
+       printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+       return err;
+err_fnic_trace_debugfs_init:
+       fnic_trace_free();
+err_fnic_trace_buf_init:
+       return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+       fnic_tracing_enabled = 0;
+       fnic_trace_debugfs_terminate();
+       if (fnic_trace_entries.page_offset) {
+               vfree((void *)fnic_trace_entries.page_offset);
+               fnic_trace_entries.page_offset = NULL;
+       }
+       if (fnic_trace_buf_p) {
+               vfree((void *)fnic_trace_buf_p);
+               fnic_trace_buf_p = 0;
+       }
+       printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644 (file)
index 0000000..cef42b4
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+                                         size_t count,
+                                         loff_t *ppos,
+                                         const void *from,
+                                         size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+       int wr_idx;
+       int rd_idx;
+       unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+       int buffer_len;
+       char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+       union {
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+               u64 val;
+       } timestamp, fnaddr;
+       u32 host_no;
+       u32 tag;
+       u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+                 (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e)           \
+       if (unlikely(fnic_tracing_enabled)) {                   \
+               fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+               if (trace_buf) { \
+                       if (sizeof(unsigned long) < 8) { \
+                               trace_buf->timestamp.low = jiffies; \
+                               trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+                       } else { \
+                               trace_buf->timestamp.val = jiffies; \
+                               trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+                       } \
+                       trace_buf->host_no = _hn; \
+                       trace_buf->tag = _t; \
+                       trace_buf->data[0] = (u64)(unsigned long)_a; \
+                       trace_buf->data[1] = (u64)(unsigned long)_b; \
+                       trace_buf->data[2] = (u64)(unsigned long)_c; \
+                       trace_buf->data[3] = (u64)(unsigned long)_d; \
+                       trace_buf->data[4] = (u64)(unsigned long)_e; \
+               } \
+       }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
index 4f33806..7f4f790 100644 (file)
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
 static struct CommandList *cmd_alloc(struct ctlr_info *h);
 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
        int cmd_type);
 
@@ -1131,7 +1131,7 @@ clean:
        return -ENOMEM;
 }
 
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
        struct CommandList *c)
 {
        struct SGDescriptor *chain_sg, *chain_block;
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h,
                (c->Header.SGTotal - h->max_cmd_sg_entries);
        temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
                                PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&h->pdev->dev, temp64)) {
+               /* prevent subsequent unmapping */
+               chain_sg->Addr.lower = 0;
+               chain_sg->Addr.upper = 0;
+               return -1;
+       }
        chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
        chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+       return 0;
 }
 
 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
        }
 }
 
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
                struct CommandList *cp,
                unsigned char *buf,
                size_t buflen,
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev,
        if (buflen == 0 || data_direction == PCI_DMA_NONE) {
                cp->Header.SGList = 0;
                cp->Header.SGTotal = 0;
-               return;
+               return 0;
        }
 
        addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+       if (dma_mapping_error(&pdev->dev, addr64)) {
+               /* Prevent subsequent unmap of something never mapped */
+               cp->Header.SGList = 0;
+               cp->Header.SGTotal = 0;
+               return -1;
+       }
        cp->SG[0].Addr.lower =
          (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
        cp->SG[0].Addr.upper =
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
        cp->SG[0].Len = buflen;
        cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
        cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+       return 0;
 }
 
 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
                return -ENOMEM;
        }
 
-       fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+       if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+                       page, scsi3addr, TYPE_CMD)) {
+               rc = -1;
+               goto out;
+       }
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
        ei = c->err_info;
        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
                hpsa_scsi_interpret_error(c);
                rc = -1;
        }
+out:
        cmd_special_free(h, c);
        return rc;
 }
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
                return -ENOMEM;
        }
 
-       fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+       /* fill_cmd can't fail here, no data buffer to map. */
+       (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+                       NULL, 0, 0, scsi3addr, TYPE_MSG);
        hpsa_scsi_do_simple_cmd_core(h, c);
        /* no unmap needed here because no data xfer. */
 
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
        }
        /* address the controller */
        memset(scsi3addr, 0, sizeof(scsi3addr));
-       fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
-               buf, bufsize, 0, scsi3addr, TYPE_CMD);
+       if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+               buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+               rc = -1;
+               goto out;
+       }
        if (extended_response)
                c->Request.CDB[1] = extended_response;
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
                hpsa_scsi_interpret_error(c);
                rc = -1;
        }
+out:
        cmd_special_free(h, c);
        return rc;
 }
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
        if (chained) {
                cp->Header.SGList = h->max_cmd_sg_entries;
                cp->Header.SGTotal = (u16) (use_sg + 1);
-               hpsa_map_sg_chain_block(h, cp);
+               if (hpsa_map_sg_chain_block(h, cp)) {
+                       scsi_dma_unmap(cmd);
+                       return -1;
+               }
                return 0;
        }
 
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
                if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
                        waittime = waittime * 2;
 
-               /* Send the Test Unit Ready */
-               fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+               /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+               (void) fill_cmd(c, TEST_UNIT_READY, h,
+                               NULL, 0, 0, lunaddr, TYPE_CMD);
                hpsa_scsi_do_simple_cmd_core(h, c);
                /* no unmap needed here because no data xfer. */
 
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
                return -ENOMEM;
        }
 
-       fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+       /* fill_cmd can't fail here, no buffer to map */
+       (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+               0, 0, scsi3addr, TYPE_MSG);
        if (swizzle)
                swizzle_abort_tag(&c->Request.CDB[4]);
        hpsa_scsi_do_simple_cmd_core(h, c);
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        struct CommandList *c;
        char *buff = NULL;
        union u64bit temp64;
+       int rc = 0;
 
        if (!argp)
                return -EINVAL;
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                        /* Copy the data into the buffer we created */
                        if (copy_from_user(buff, iocommand.buf,
                                iocommand.buf_size)) {
-                               kfree(buff);
-                               return -EFAULT;
+                               rc = -EFAULT;
+                               goto out_kfree;
                        }
                } else {
                        memset(buff, 0, iocommand.buf_size);
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        }
        c = cmd_special_alloc(h);
        if (c == NULL) {
-               kfree(buff);
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto out_kfree;
        }
        /* Fill in the command type */
        c->cmd_type = CMD_IOCTL_PEND;
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        if (iocommand.buf_size > 0) {
                temp64.val = pci_map_single(h->pdev, buff,
                        iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+                       c->SG[0].Addr.lower = 0;
+                       c->SG[0].Addr.upper = 0;
+                       c->SG[0].Len = 0;
+                       rc = -ENOMEM;
+                       goto out;
+               }
                c->SG[0].Addr.lower = temp64.val32.lower;
                c->SG[0].Addr.upper = temp64.val32.upper;
                c->SG[0].Len = iocommand.buf_size;
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        memcpy(&iocommand.error_info, c->err_info,
                sizeof(iocommand.error_info));
        if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
-               kfree(buff);
-               cmd_special_free(h, c);
-               return -EFAULT;
+               rc = -EFAULT;
+               goto out;
        }
        if (iocommand.Request.Type.Direction == XFER_READ &&
                iocommand.buf_size > 0) {
                /* Copy the data out of the buffer we created */
                if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
-                       kfree(buff);
-                       cmd_special_free(h, c);
-                       return -EFAULT;
+                       rc = -EFAULT;
+                       goto out;
                }
        }
-       kfree(buff);
+out:
        cmd_special_free(h, c);
-       return 0;
+out_kfree:
+       kfree(buff);
+       return rc;
 }
 
 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                for (i = 0; i < sg_used; i++) {
                        temp64.val = pci_map_single(h->pdev, buff[i],
                                    buff_size[i], PCI_DMA_BIDIRECTIONAL);
+                       if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+                               c->SG[i].Addr.lower = 0;
+                               c->SG[i].Addr.upper = 0;
+                               c->SG[i].Len = 0;
+                               hpsa_pci_unmap(h->pdev, c, i,
+                                       PCI_DMA_BIDIRECTIONAL);
+                               status = -ENOMEM;
+                               goto cleanup1;
+                       }
                        c->SG[i].Addr.lower = temp64.val32.lower;
                        c->SG[i].Addr.upper = temp64.val32.upper;
                        c->SG[i].Len = buff_size[i];
@@ -3190,7 +3238,8 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        c = cmd_alloc(h);
        if (!c)
                return -ENOMEM;
-       fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+       /* fill_cmd can't fail here, no data buffer to map */
+       (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
                RAID_CTLR_LUNID, TYPE_MSG);
        c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
        c->waiting = NULL;
@@ -3202,7 +3251,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        return 0;
 }
 
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
        int cmd_type)
 {
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                default:
                        dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
                        BUG();
-                       return;
+                       return -1;
                }
        } else if (cmd_type == TYPE_MSG) {
                switch (cmd) {
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        default:
                pci_dir = PCI_DMA_BIDIRECTIONAL;
        }
-
-       hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
-       return;
+       if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+               return -1;
+       return 0;
 }
 
 /*
@@ -4882,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h)
                dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
                goto out_of_memory;
        }
-       fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
-               RAID_CTLR_LUNID, TYPE_CMD);
+       if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+               RAID_CTLR_LUNID, TYPE_CMD)) {
+               goto out;
+       }
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
        if (c->err_info->CommandStatus != 0)
+out:
                dev_warn(&h->pdev->dev,
                        "error flushing cache on controller\n");
        cmd_special_free(h, c);
index 8fa79b8..f328089 100644 (file)
@@ -6112,7 +6112,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
         * We have told the host to stop giving us new requests, but
         * ERP ops don't count. FIXME
         */
-       if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
+       if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
@@ -6121,7 +6121,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
         * FIXME - Create scsi_set_host_offline interface
         *  and the ioa_is_dead check can be removed
         */
-       if (unlikely(hrrq->ioa_is_dead || !res)) {
+       if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                goto err_nodev;
        }
@@ -6741,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 
        ENTER;
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+               ipr_trace;
+               spin_unlock_irq(ioa_cfg->host->host_lock);
+               scsi_unblock_requests(ioa_cfg->host);
+               spin_lock_irq(ioa_cfg->host->host_lock);
+       }
+
        ioa_cfg->in_reset_reload = 0;
        ioa_cfg->reset_retries = 0;
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
-
-       spin_unlock_irq(ioa_cfg->host->host_lock);
-       scsi_unblock_requests(ioa_cfg->host);
-       spin_lock_irq(ioa_cfg->host->host_lock);
        LEAVE;
 
        return IPR_RC_JOB_RETURN;
@@ -8494,7 +8497,8 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
        wmb();
-       scsi_block_requests(ioa_cfg->host);
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+               scsi_block_requests(ioa_cfg->host);
 
        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
        ioa_cfg->reset_cmd = ipr_cmd;
@@ -8549,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                        ipr_fail_all_ops(ioa_cfg);
                        wake_up_all(&ioa_cfg->reset_wait_q);
 
-                       spin_unlock_irq(ioa_cfg->host->host_lock);
-                       scsi_unblock_requests(ioa_cfg->host);
-                       spin_lock_irq(ioa_cfg->host->host_lock);
+                       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+                               spin_unlock_irq(ioa_cfg->host->host_lock);
+                               scsi_unblock_requests(ioa_cfg->host);
+                               spin_lock_irq(ioa_cfg->host->host_lock);
+                       }
                        return;
                } else {
                        ioa_cfg->in_ioa_bringdown = 1;
@@ -9695,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev)
 {
        unsigned long host_lock_flags = 0;
        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+       int i;
        ENTER;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9704,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
                spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
        }
 
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].removing_ioa = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
        ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
index 1a9a246..21a6ff1 100644 (file)
@@ -493,6 +493,7 @@ struct ipr_hrr_queue {
        u8 allow_interrupts:1;
        u8 ioa_is_dead:1;
        u8 allow_cmds:1;
+       u8 removing_ioa:1;
 
        struct blk_iopoll iopoll;
 };
index fcb9d0b..09c81b2 100644 (file)
@@ -1381,10 +1381,10 @@ static void fc_fcp_timeout(unsigned long data)
 
        fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
 
-       if (fsp->state & FC_SRB_RCV_STATUS)
-               fc_fcp_complete_locked(fsp);
-       else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+       if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
                fc_fcp_rec(fsp);
+       else if (fsp->state & FC_SRB_RCV_STATUS)
+               fc_fcp_complete_locked(fsp);
        else
                fc_fcp_recovery(fsp, FC_TIMED_OUT);
        fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
index c2830cc..b74189d 100644 (file)
@@ -41,25 +41,25 @@ extern unsigned int fc_debug_logging;
 
 #define FC_LIBFC_DBG(fmt, args...)                                     \
        FC_CHECK_LOGGING(FC_LIBFC_LOGGING,                              \
-                        printk(KERN_INFO "libfc: " fmt, ##args))
+                        pr_info("libfc: " fmt, ##args))
 
 #define FC_LPORT_DBG(lport, fmt, args...)                              \
        FC_CHECK_LOGGING(FC_LPORT_LOGGING,                              \
-                        printk(KERN_INFO "host%u: lport %6.6x: " fmt,  \
-                               (lport)->host->host_no,                 \
-                               (lport)->port_id, ##args))
+                        pr_info("host%u: lport %6.6x: " fmt,           \
+                                (lport)->host->host_no,                \
+                                (lport)->port_id, ##args))
 
-#define FC_DISC_DBG(disc, fmt, args...)                                \
-       FC_CHECK_LOGGING(FC_DISC_LOGGING,                       \
-                        printk(KERN_INFO "host%u: disc: " fmt, \
-                               fc_disc_lport(disc)->host->host_no,     \
-                               ##args))
+#define FC_DISC_DBG(disc, fmt, args...)                                        \
+       FC_CHECK_LOGGING(FC_DISC_LOGGING,                               \
+                        pr_info("host%u: disc: " fmt,                  \
+                                fc_disc_lport(disc)->host->host_no,    \
+                                ##args))
 
 #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...)                  \
        FC_CHECK_LOGGING(FC_RPORT_LOGGING,                              \
-                        printk(KERN_INFO "host%u: rport %6.6x: " fmt,  \
-                               (lport)->host->host_no,                 \
-                               (port_id), ##args))
+                        pr_info("host%u: rport %6.6x: " fmt,           \
+                                (lport)->host->host_no,                \
+                                (port_id), ##args))
 
 #define FC_RPORT_DBG(rdata, fmt, args...)                              \
        FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
@@ -70,13 +70,13 @@ extern unsigned int fc_debug_logging;
                if ((pkt)->seq_ptr) {                                   \
                        struct fc_exch *_ep = NULL;                     \
                        _ep = fc_seq_exch((pkt)->seq_ptr);              \
-                       printk(KERN_INFO "host%u: fcp: %6.6x: "         \
+                       pr_info("host%u: fcp: %6.6x: "                  \
                                "xid %04x-%04x: " fmt,                  \
                                (pkt)->lp->host->host_no,               \
                                (pkt)->rport->port_id,                  \
                                (_ep)->oxid, (_ep)->rxid, ##args);      \
                } else {                                                \
-                       printk(KERN_INFO "host%u: fcp: %6.6x: " fmt,    \
+                       pr_info("host%u: fcp: %6.6x: " fmt,             \
                                (pkt)->lp->host->host_no,               \
                                (pkt)->rport->port_id, ##args);         \
                }                                                       \
@@ -84,14 +84,14 @@ extern unsigned int fc_debug_logging;
 
 #define FC_EXCH_DBG(exch, fmt, args...)                                        \
        FC_CHECK_LOGGING(FC_EXCH_LOGGING,                               \
-                        printk(KERN_INFO "host%u: xid %4x: " fmt,      \
-                               (exch)->lp->host->host_no,              \
-                               exch->xid, ##args))
+                        pr_info("host%u: xid %4x: " fmt,               \
+                                (exch)->lp->host->host_no,             \
+                                exch->xid, ##args))
 
 #define FC_SCSI_DBG(lport, fmt, args...)                               \
        FC_CHECK_LOGGING(FC_SCSI_LOGGING,                               \
-                        printk(KERN_INFO "host%u: scsi: " fmt,         \
-                               (lport)->host->host_no, ##args))
+                        pr_info("host%u: scsi: " fmt,                  \
+                                (lport)->host->host_no, ##args))
 
 /*
  * FC-4 Providers.
index 83aa1ef..d518d17 100644 (file)
@@ -582,7 +582,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
                                 struct fc_frame *fp)
 {
-       unsigned long delay = FC_DEF_E_D_TOV;
+       unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
 
        /* make sure this isn't an FC_EX_CLOSED error, never retry those */
        if (PTR_ERR(fp) == -FC_EX_CLOSED)
index 55b6fc8..74b67d9 100644 (file)
@@ -6644,7 +6644,7 @@ static int
 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                       uint32_t flag)
 {
-       MAILBOX_t *mb;
+       MAILBOX_t *mbx;
        struct lpfc_sli *psli = &phba->sli;
        uint32_t status, evtctr;
        uint32_t ha_copy, hc_copy;
@@ -6698,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 
        psli = &phba->sli;
 
-       mb = &pmbox->u.mb;
+       mbx = &pmbox->u.mb;
        status = MBX_SUCCESS;
 
        if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6713,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                goto out_not_finished;
        }
 
-       if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+       if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
                if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
                        !(hc_copy & HC_MBINT_ENA)) {
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6767,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                "(%d):0308 Mbox cmd issue - BUSY Data: "
                                "x%x x%x x%x x%x\n",
                                pmbox->vport ? pmbox->vport->vpi : 0xffffff,
-                               mb->mbxCommand, phba->pport->port_state,
+                               mbx->mbxCommand, phba->pport->port_state,
                                psli->sli_flag, flag);
 
                psli->slistat.mbox_busy++;
@@ -6777,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        lpfc_debugfs_disc_trc(pmbox->vport,
                                LPFC_DISC_TRC_MBOX_VPORT,
                                "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
                else {
                        lpfc_debugfs_disc_trc(phba->pport,
                                LPFC_DISC_TRC_MBOX,
                                "MBOX Bsy:        cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
 
                return MBX_BUSY;
@@ -6796,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
        /* If we are not polling, we MUST be in SLI2 mode */
        if (flag != MBX_POLL) {
                if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
-                   (mb->mbxCommand != MBX_KILL_BOARD)) {
+                   (mbx->mbxCommand != MBX_KILL_BOARD)) {
                        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
                        /* Mbox command <mbxCommand> cannot issue */
@@ -6818,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
                        "x%x\n",
                        pmbox->vport ? pmbox->vport->vpi : 0,
-                       mb->mbxCommand, phba->pport->port_state,
+                       mbx->mbxCommand, phba->pport->port_state,
                        psli->sli_flag, flag);
 
-       if (mb->mbxCommand != MBX_HEARTBEAT) {
+       if (mbx->mbxCommand != MBX_HEARTBEAT) {
                if (pmbox->vport) {
                        lpfc_debugfs_disc_trc(pmbox->vport,
                                LPFC_DISC_TRC_MBOX_VPORT,
                                "MBOX Send vport: cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
                else {
                        lpfc_debugfs_disc_trc(phba->pport,
                                LPFC_DISC_TRC_MBOX,
                                "MBOX Send:       cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
        }
 
@@ -6842,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
        evtctr = psli->slistat.mbox_event;
 
        /* next set own bit for the adapter and copy over command word */
-       mb->mbxOwner = OWN_CHIP;
+       mbx->mbxOwner = OWN_CHIP;
 
        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                /* Populate mbox extension offset word. */
                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
-                       *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+                       *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
                                = (uint8_t *)phba->mbox_ext
                                  - (uint8_t *)phba->mbox;
                }
@@ -6859,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                pmbox->in_ext_byte_len);
                }
                /* Copy command data to host SLIM area */
-               lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+               lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
        } else {
                /* Populate mbox extension offset word. */
                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
-                       *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+                       *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
                                = MAILBOX_HBA_EXT_OFFSET;
 
                /* Copy the mailbox extension data */
@@ -6873,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                pmbox->context2, pmbox->in_ext_byte_len);
 
                }
-               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+               if (mbx->mbxCommand == MBX_CONFIG_PORT) {
                        /* copy command data into host mbox for cmpl */
-                       lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+                       lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
                }
 
                /* First copy mbox command data to HBA SLIM, skip past first
                   word */
                to_slim = phba->MBslimaddr + sizeof (uint32_t);
-               lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+               lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
                            MAILBOX_CMD_SIZE - sizeof (uint32_t));
 
                /* Next copy over first word, with mbxOwner set */
-               ldata = *((uint32_t *)mb);
+               ldata = *((uint32_t *)mbx);
                to_slim = phba->MBslimaddr;
                writel(ldata, to_slim);
                readl(to_slim); /* flush */
 
-               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+               if (mbx->mbxCommand == MBX_CONFIG_PORT) {
                        /* switch over to host mailbox */
                        psli->sli_flag |= LPFC_SLI_ACTIVE;
                }
@@ -6965,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                /* First copy command data */
                                word0 = *((uint32_t *)phba->mbox);
                                word0 = le32_to_cpu(word0);
-                               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+                               if (mbx->mbxCommand == MBX_CONFIG_PORT) {
                                        MAILBOX_t *slimmb;
                                        uint32_t slimword0;
                                        /* Check real SLIM for any errors */
@@ -6992,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 
                if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                        /* copy results back to user */
-                       lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+                       lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
                        /* Copy the mailbox extension data */
                        if (pmbox->out_ext_byte_len && pmbox->context2) {
                                lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -7001,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        }
                } else {
                        /* First copy command data */
-                       lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+                       lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
                                                        MAILBOX_CMD_SIZE);
                        /* Copy the mailbox extension data */
                        if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -7016,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                readl(phba->HAregaddr); /* flush */
 
                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-               status = mb->mbxStatus;
+               status = mbx->mbxStatus;
        }
 
        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
index 3b2365c..408d254 100644 (file)
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "06.504.01.00-rc1"
-#define MEGASAS_RELDATE                                "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION                    "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION                                "06.506.00.00-rc1"
+#define MEGASAS_RELDATE                                "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION                    "Sat. Feb. 9 17:00:00 PDT 2013"
 
 /*
  * Device IDs
index 66a0fec..9d53540 100644 (file)
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v06.504.01.00-rc1
+ *  Version : v06.506.00.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
index 74030af..a7d5668 100644 (file)
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                                MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
                }
                io_request->Control |= (0x4 << 26);
-               io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+               io_request->EEDPBlockSize = scp->device->sector_size;
        } else {
                /* Some drives don't support 16/12 byte CDB's, convert to 10 */
                if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
        if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
            instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
                io_request->Function = 0;
-               io_request->DevHandle =
+               if (fusion->fast_path_io)
+                       io_request->DevHandle =
                        local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
                io_request->RaidContext.timeoutValue =
                        local_map_ptr->raidMap.fpPdIoTimeoutSec;
index a7c64f0..f68a3cd 100644 (file)
@@ -61,7 +61,6 @@
 #define MEGASAS_SCSI_ADDL_CDB_LEN                   0x18
 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL                    0x20
 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE           0x60
-#define MEGASAS_EEDPBLOCKSIZE                      512
 
 /*
  * Raid context flags
index 5e24e7e..bcb23d2 100644 (file)
@@ -2023,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
                            MPT2SAS_INTEL_RMS25KB040_BRANDING);
                        break;
+               case MPT2SAS_INTEL_RMS25LB040_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25LB040_BRANDING);
+                       break;
+               case MPT2SAS_INTEL_RMS25LB080_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25LB080_BRANDING);
+                       break;
                default:
                        break;
                }
index c6ee7aa..4caaac1 100644 (file)
                                "Intel(R) Integrated RAID Module RMS25KB080"
 #define MPT2SAS_INTEL_RMS25KB040_BRANDING    \
                                "Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING      \
+                               "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING      \
+                               "Intel(R) Integrated RAID Module RMS25LB080"
 #define MPT2SAS_INTEL_RMS2LL080_BRANDING       \
                                "Intel Integrated RAID Module RMS2LL080"
 #define MPT2SAS_INTEL_RMS2LL040_BRANDING       \
 #define MPT2SAS_INTEL_RMS25JB040_SSDID         0x3517
 #define MPT2SAS_INTEL_RMS25KB080_SSDID         0x3518
 #define MPT2SAS_INTEL_RMS25KB040_SSDID         0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID         0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID         0x351B
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
 #define MPT2SAS_INTEL_RS25GB008_SSDID          0x3000
index 078c639..532110f 100644 (file)
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
                             struct mvs_task_exec_info *tei)
 {
        int elem, rc, i;
+       struct sas_ha_struct *sha = mvi->sas;
        struct sas_task *task = tei->task;
        struct mvs_cmd_hdr *hdr = tei->hdr;
        struct domain_device *dev = task->dev;
        struct asd_sas_port *sas_port = dev->port;
+       struct sas_phy *sphy = dev->phy;
+       struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
        struct scatterlist *sg_req, *sg_resp;
        u32 req_len, resp_len, tag = tei->tag;
        void *buf_tmp;
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
        slot->tx = mvi->tx_prod;
        mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
                                        TXQ_MODE_I | tag |
-                                       (sas_port->phy_mask << TXQ_PHY_SHIFT));
+                                       (MVS_PHY_ID << TXQ_PHY_SHIFT));
 
        hdr->flags |= flags;
        hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
 static int mvs_task_prep_ata(struct mvs_info *mvi,
                             struct mvs_task_exec_info *tei)
 {
+       struct sas_ha_struct *sha = mvi->sas;
        struct sas_task *task = tei->task;
        struct domain_device *dev = task->dev;
        struct mvs_device *mvi_dev = dev->lldd_dev;
        struct mvs_cmd_hdr *hdr = tei->hdr;
        struct asd_sas_port *sas_port = dev->port;
+       struct sas_phy *sphy = dev->phy;
+       struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
        struct mvs_slot_info *slot;
        void *buf_prd;
        u32 tag = tei->tag, hdr_tag;
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
        slot->tx = mvi->tx_prod;
        del_q = TXQ_MODE_I | tag |
                (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
-               (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+               (MVS_PHY_ID << TXQ_PHY_SHIFT) |
                (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
        mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
 
index 2ae77a0..9f3cc13 100644 (file)
@@ -76,6 +76,7 @@ extern struct kmem_cache *mvs_task_list_cache;
                                        (__mc) != 0 ;           \
                                        (++__lseq), (__mc) >>= 1)
 
+#define MVS_PHY_ID (1U << sas_phy->id)
 #define MV_INIT_DELAYED_WORK(w, f, d)  INIT_DELAYED_WORK(w, f)
 #define UNASSOC_D2H_FIS(id)            \
        ((void *) mvi->rx_fis + 0x100 * id)
index c06b8e5..d8293f2 100644 (file)
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od,
        odi->osdname_len = get_attrs[a].len;
        /* Avoid NULL for memcmp optimization 0-length is good enough */
        odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+       if (!odi->osdname) {
+               ret = -ENOMEM;
+               goto out;
+       }
        if (odi->osdname_len)
                memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
        OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
index 4c9fe73..3d5e522 100644 (file)
@@ -140,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
                        pci_free_consistent(pm8001_ha->pdev,
-                               pm8001_ha->memoryMap.region[i].element_size,
+                               (pm8001_ha->memoryMap.region[i].total_len +
+                               pm8001_ha->memoryMap.region[i].alignment),
                                pm8001_ha->memoryMap.region[i].virt_ptr,
                                pm8001_ha->memoryMap.region[i].phys_addr);
                        }
index 83d7984..1d82eef 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
-       int rval = QLA_FUNCTION_FAILED;
-       uint16_t temp, frac;
+       uint16_t temp = 0;
 
-       if (!vha->hw->flags.thermal_supported)
-               return snprintf(buf, PAGE_SIZE, "\n");
+       if (!vha->hw->thermal_support) {
+               ql_log(ql_log_warn, vha, 0x70db,
+                   "Thermal not supported by this card.\n");
+               goto done;
+       }
 
-       temp = frac = 0;
-       if (qla2x00_reset_active(vha))
-               ql_log(ql_log_warn, vha, 0x707b,
-                   "ISP reset active.\n");
-       else if (!vha->hw->flags.eeh_busy)
-               rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
-       if (rval != QLA_SUCCESS)
-               return snprintf(buf, PAGE_SIZE, "\n");
+       if (qla2x00_reset_active(vha)) {
+               ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+               goto done;
+       }
+
+       if (vha->hw->flags.eeh_busy) {
+               ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+               goto done;
+       }
+
+       if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+               return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 
-       return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+       return snprintf(buf, PAGE_SIZE, "\n");
 }
 
 static ssize_t
index 9f34ded..ad54099 100644 (file)
@@ -27,7 +27,7 @@ void
 qla2x00_bsg_sp_free(void *data, void *ptr)
 {
        srb_t *sp = (srb_t *)ptr;
-       struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+       struct scsi_qla_host *vha = sp->fcport->vha;
        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
        struct qla_hw_data *ha = vha->hw;
 
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
        if (sp->type == SRB_CT_CMD ||
            sp->type == SRB_ELS_CMD_HST)
                kfree(sp->fcport);
-       mempool_free(sp, vha->hw->srb_mempool);
+       qla2x00_rel_sp(vha, sp);
 }
 
 int
@@ -368,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x700e,
                    "qla2x00_start_sp failed = %d\n", rval);
-               mempool_free(sp, ha->srb_mempool);
+               qla2x00_rel_sp(vha, sp);
                rval = -EIO;
                goto done_unmap_sg;
        }
@@ -515,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x7017,
                    "qla2x00_start_sp failed=%d.\n", rval);
-               mempool_free(sp, ha->srb_mempool);
+               qla2x00_rel_sp(vha, sp);
                rval = -EIO;
                goto done_free_fcport;
        }
@@ -531,6 +531,75 @@ done_unmap_sg:
 done:
        return rval;
 }
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+                           int wait, int wait2)
+{
+       int ret = 0;
+       int rval = 0;
+       uint16_t new_config[4];
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+               goto done_reset_internal;
+
+       memset(new_config, 0 , sizeof(new_config));
+       if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+           ENABLE_INTERNAL_LOOPBACK ||
+           (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+           ENABLE_EXTERNAL_LOOPBACK) {
+               new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+               ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+                   (new_config[0] & INTERNAL_LOOPBACK_MASK));
+               memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+               ha->notify_dcbx_comp = wait;
+               ha->notify_lb_portup_comp = wait2;
+
+               ret = qla81xx_set_port_config(vha, new_config);
+               if (ret != QLA_SUCCESS) {
+                       ql_log(ql_log_warn, vha, 0x7025,
+                           "Set port config failed.\n");
+                       ha->notify_dcbx_comp = 0;
+                       ha->notify_lb_portup_comp = 0;
+                       rval = -EINVAL;
+                       goto done_reset_internal;
+               }
+
+               /* Wait for DCBX complete event */
+               if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+                       (DCBX_COMP_TIMEOUT * HZ))) {
+                       ql_dbg(ql_dbg_user, vha, 0x7026,
+                           "DCBX completion not received.\n");
+                       ha->notify_dcbx_comp = 0;
+                       ha->notify_lb_portup_comp = 0;
+                       rval = -EINVAL;
+                       goto done_reset_internal;
+               } else
+                       ql_dbg(ql_dbg_user, vha, 0x7027,
+                           "DCBX completion received.\n");
+
+               if (wait2 &&
+                   !wait_for_completion_timeout(&ha->lb_portup_comp,
+                   (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+                       ql_dbg(ql_dbg_user, vha, 0x70c5,
+                           "Port up completion not received.\n");
+                       ha->notify_lb_portup_comp = 0;
+                       rval = -EINVAL;
+                       goto done_reset_internal;
+               } else
+                       ql_dbg(ql_dbg_user, vha, 0x70c6,
+                           "Port up completion received.\n");
+
+               ha->notify_dcbx_comp = 0;
+               ha->notify_lb_portup_comp = 0;
+       }
+done_reset_internal:
+       return rval;
+}
+
 /*
  * Set the port configuration to enable the internal or external loopback
  * depending on the loopback mode.
@@ -566,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
        }
 
        /* Wait for DCBX complete event */
-       if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+       if (!wait_for_completion_timeout(&ha->dcbx_comp,
+           (DCBX_COMP_TIMEOUT * HZ))) {
                ql_dbg(ql_dbg_user, vha, 0x7022,
-                   "State change notification not received.\n");
+                   "DCBX completion not received.\n");
+               ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+               /*
+                * If the reset of the loopback mode doesn't work take a FCoE
+                * dump and reset the chip.
+                */
+               if (ret) {
+                       ha->isp_ops->fw_dump(vha, 0);
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               }
                rval = -EINVAL;
        } else {
                if (ha->flags.idc_compl_status) {
@@ -578,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
                        ha->flags.idc_compl_status = 0;
                } else
                        ql_dbg(ql_dbg_user, vha, 0x7023,
-                           "State change received.\n");
+                           "DCBX completion received.\n");
        }
 
        ha->notify_dcbx_comp = 0;
@@ -587,57 +666,6 @@ done_set_internal:
        return rval;
 }
 
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
-    int wait)
-{
-       int ret = 0;
-       int rval = 0;
-       uint16_t new_config[4];
-       struct qla_hw_data *ha = vha->hw;
-
-       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
-               goto done_reset_internal;
-
-       memset(new_config, 0 , sizeof(new_config));
-       if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
-           ENABLE_INTERNAL_LOOPBACK ||
-           (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
-           ENABLE_EXTERNAL_LOOPBACK) {
-               new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
-               ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
-                   (new_config[0] & INTERNAL_LOOPBACK_MASK));
-               memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
-               ha->notify_dcbx_comp = wait;
-               ret = qla81xx_set_port_config(vha, new_config);
-               if (ret != QLA_SUCCESS) {
-                       ql_log(ql_log_warn, vha, 0x7025,
-                           "Set port config failed.\n");
-                       ha->notify_dcbx_comp = 0;
-                       rval = -EINVAL;
-                       goto done_reset_internal;
-               }
-
-               /* Wait for DCBX complete event */
-               if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
-                       (20 * HZ))) {
-                       ql_dbg(ql_dbg_user, vha, 0x7026,
-                           "State change notification not received.\n");
-                       ha->notify_dcbx_comp = 0;
-                       rval = -EINVAL;
-                       goto done_reset_internal;
-               } else
-                       ql_dbg(ql_dbg_user, vha, 0x7027,
-                           "State change received.\n");
-
-               ha->notify_dcbx_comp = 0;
-       }
-done_reset_internal:
-       return rval;
-}
-
 static int
 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 {
@@ -739,6 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
                        memset(config, 0, sizeof(config));
                        memset(new_config, 0, sizeof(new_config));
+
                        if (qla81xx_get_port_config(vha, config)) {
                                ql_log(ql_log_warn, vha, 0x701f,
                                    "Get port config failed.\n");
@@ -746,6 +775,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                goto done_free_dma_rsp;
                        }
 
+                       if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+                               ql_dbg(ql_dbg_user, vha, 0x70c4,
+                                   "Loopback operation already in "
+                                   "progress.\n");
+                               rval = -EAGAIN;
+                               goto done_free_dma_rsp;
+                       }
+
                        ql_dbg(ql_dbg_user, vha, 0x70c0,
                            "elreq.options=%04x\n", elreq.options);
 
@@ -755,7 +792,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                            config, new_config, elreq.options);
                                else
                                        rval = qla81xx_reset_loopback_mode(vha,
-                                           config, 1);
+                                           config, 1, 0);
                        else
                                rval = qla81xx_set_loopback_mode(vha, config,
                                    new_config, elreq.options);
@@ -772,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                        command_sent = INT_DEF_LB_LOOPBACK_CMD;
                        rval = qla2x00_loopback_test(vha, &elreq, response);
 
-                       if (new_config[0]) {
-                               /* Revert back to original port config
-                                * Also clear internal loopback
-                                */
-                               qla81xx_reset_loopback_mode(vha,
-                                   new_config, 0);
-                       }
-
                        if (response[0] == MBS_COMMAND_ERROR &&
                                        response[1] == MBS_LB_RESET) {
                                ql_log(ql_log_warn, vha, 0x7029,
@@ -788,15 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                qla2xxx_wake_dpc(vha);
                                qla2x00_wait_for_chip_reset(vha);
                                /* Also reset the MPI */
-                               if (qla81xx_restart_mpi_firmware(vha) !=
-                                   QLA_SUCCESS) {
-                                       ql_log(ql_log_warn, vha, 0x702a,
-                                           "MPI reset failed.\n");
+                               if (IS_QLA81XX(ha)) {
+                                       if (qla81xx_restart_mpi_firmware(vha) !=
+                                           QLA_SUCCESS) {
+                                               ql_log(ql_log_warn, vha, 0x702a,
+                                                   "MPI reset failed.\n");
+                                       }
                                }
 
                                rval = -EIO;
                                goto done_free_dma_rsp;
                        }
+
+                       if (new_config[0]) {
+                               int ret;
+
+                               /* Revert back to original port config
+                                * Also clear internal loopback
+                                */
+                               ret = qla81xx_reset_loopback_mode(vha,
+                                   new_config, 0, 1);
+                               if (ret) {
+                                       /*
+                                        * If the reset of the loopback mode
+                                        * doesn't work take FCoE dump and then
+                                        * reset the chip.
+                                        */
+                                       ha->isp_ops->fw_dump(vha, 0);
+                                       set_bit(ISP_ABORT_NEEDED,
+                                           &vha->dpc_flags);
+                               }
+
+                       }
+
                } else {
                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
                        ql_dbg(ql_dbg_user, vha, 0x702b,
@@ -1950,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
                if (!req)
                        continue;
 
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                        sp = req->outstanding_cmds[cnt];
                        if (sp) {
                                if (((sp->type == SRB_CT_CMD) ||
@@ -1985,6 +2038,6 @@ done:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        if (bsg_job->request->msgcode == FC_BSG_HST_CT)
                kfree(sp->fcport);
-       mempool_free(sp, ha->srb_mempool);
+       qla2x00_rel_sp(vha, sp);
        return 0;
 }
index 37b8b7b..e9f6b9b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 53f9e49..1626de5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0125       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x114f       | 0x111a-0x111b  |
+ * | Module Init and Probe        |       0x0126       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x115b       | 0x111a-0x111b  |
  * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
  * | Device Discovery             |       0x2087       | 0x2020-0x2022, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3030       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3031       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x302d-0x302e  |
  * | DPC Thread                   |       0x401d       | 0x4002,0x4013  |
  * | Async Events                 |       0x5071       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
  * | Timer Routines               |       0x6011       |                |
- * | User Space Interactions      |       0x70c3       | 0x7018,0x702e, |
+ * | User Space Interactions      |       0x70c4       | 0x7018,0x702e, |
+ * |                              |                    | 0x7020,0x7024, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
  * |                              |                    | 0x708c,        |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb084       | 0xb002,0xb024  |
+ * | ISP82XX Specific             |       0xb086       | 0xb002,0xb024  |
  * | MultiQ                       |       0xc00c       |               |
  * | Misc                         |       0xd010       |               |
- * | Target Mode                 |       0xe06f       |                |
- * | Target Mode Management      |       0xf071       |                |
+ * | Target Mode                 |       0xe070       |                |
+ * | Target Mode Management      |       0xf072       |                |
  * | Target Mode Task Management  |      0x1000b      |                |
  * ----------------------------------------------------------------------
  */
index 8f911c0..35e20b4 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 6e7727f..c650991 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -37,6 +37,7 @@
 #include "qla_nx.h"
 #define QLA2XXX_DRIVER_NAME    "qla2xxx"
 #define QLA2XXX_APIDEV         "ql2xapidev"
+#define QLA2XXX_MANUFACTURER   "QLogic Corporation"
 
 /*
  * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
 #define LOOP_DOWN_TIME                 255     /* 240 */
 #define        LOOP_DOWN_RESET                 (LOOP_DOWN_TIME - 30)
 
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS       1024
+#define DEFAULT_OUTSTANDING_COMMANDS   1024
+#define MIN_OUTSTANDING_COMMANDS       128
 
 /* ISP request and response entry counts (37-65535) */
 #define REQUEST_ENTRY_CNT_2100         128     /* Number of request entries. */
@@ -537,6 +538,8 @@ struct device_reg_25xxmq {
        uint32_t req_q_out;
        uint32_t rsp_q_in;
        uint32_t rsp_q_out;
+       uint32_t atio_q_in;
+       uint32_t atio_q_out;
 };
 
 typedef union {
@@ -563,6 +566,9 @@ typedef union {
         &(reg)->u.isp2100.mailbox5 : \
         &(reg)->u.isp2300.rsp_q_out)
 
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
 #define MAILBOX_REG(ha, reg, num) \
        (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
         (num < 8 ? \
@@ -762,8 +768,8 @@ typedef struct {
 #define MBC_PORT_LOGOUT                        0x56    /* Port Logout request */
 #define MBC_SEND_RNID_ELS              0x57    /* Send RNID ELS request */
 #define MBC_SET_RNID_PARAMS            0x59    /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS            0x5a    /* Data Rate */
-#define MBC_DATA_RATE                  0x5d    /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS            0x5a    /* Get RNID parameters */
+#define MBC_DATA_RATE                  0x5d    /* Data Rate */
 #define MBC_INITIALIZE_FIRMWARE                0x60    /* Initialize firmware */
 #define MBC_INITIATE_LIP               0x62    /* Initiate Loop */
                                                /* Initialization Procedure */
@@ -809,6 +815,7 @@ typedef struct {
 #define MBC_HOST_MEMORY_COPY           0x53    /* Host Memory Copy. */
 #define MBC_SEND_RNFT_ELS              0x5e    /* Send RNFT ELS request */
 #define MBC_GET_LINK_PRIV_STATS                0x6d    /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION                0x72    /* Do link initialization. */
 #define MBC_SET_VENDOR_ID              0x76    /* Set Vendor ID. */
 #define MBC_PORT_RESET                 0x120   /* Port Reset */
 #define MBC_SET_PORT_CONFIG            0x122   /* Set port configuration */
@@ -856,6 +863,9 @@ typedef struct {
 #define        MBX_1           BIT_1
 #define        MBX_0           BIT_0
 
+#define RNID_TYPE_SET_VERSION  0x9
+#define RNID_TYPE_ASIC_TEMP    0xC
+
 /*
  * Firmware state codes from get firmware state mailbox command
  */
@@ -1841,9 +1851,6 @@ typedef struct fc_port {
        uint8_t scan_state;
 } fc_port_t;
 
-#define QLA_FCPORT_SCAN_NONE   0
-#define QLA_FCPORT_SCAN_FOUND  1
-
 /*
  * Fibre channel port/lun states.
  */
@@ -2533,8 +2540,10 @@ struct req_que {
        uint16_t  qos;
        uint16_t  vp_idx;
        struct rsp_que *rsp;
-       srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+       srb_t **outstanding_cmds;
        uint32_t current_outstanding_cmd;
+       uint16_t num_outstanding_cmds;
+#define        MAX_Q_DEPTH             32
        int max_q_depth;
 };
 
@@ -2557,11 +2566,13 @@ struct qlt_hw_data {
        struct atio *atio_ring_ptr;     /* Current address. */
        uint16_t atio_ring_index; /* Current index. */
        uint16_t atio_q_length;
+       uint32_t __iomem *atio_q_in;
+       uint32_t __iomem *atio_q_out;
 
        void *target_lport_ptr;
        struct qla_tgt_func_tmpl *tgt_ops;
        struct qla_tgt *qla_tgt;
-       struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+       struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
        uint16_t current_handle;
 
        struct qla_tgt_vp_map *tgt_vp_map;
@@ -2618,7 +2629,6 @@ struct qla_hw_data {
                uint32_t        nic_core_hung:1;
 
                uint32_t        quiesce_owner:1;
-               uint32_t        thermal_supported:1;
                uint32_t        nic_core_reset_hdlr_active:1;
                uint32_t        nic_core_reset_owner:1;
                uint32_t        isp82xx_no_md_cap:1;
@@ -2788,6 +2798,8 @@ struct qla_hw_data {
 #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha))
 #define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha)        (ha->tgt.atio_q_length)
 
        /* HBA serial number */
        uint8_t         serial0;
@@ -2870,7 +2882,13 @@ struct qla_hw_data {
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
+       struct completion lb_portup_comp; /* Used to wait for link up during
+                                          * loopback */
+#define DCBX_COMP_TIMEOUT      20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
        int notify_dcbx_comp;
+       int notify_lb_portup_comp;
        struct mutex selflogin_lock;
 
        /* Basic firmware related information. */
@@ -2887,6 +2905,7 @@ struct qla_hw_data {
 #define RISC_START_ADDRESS_2300 0x800
 #define RISC_START_ADDRESS_2400 0x100000
        uint16_t        fw_xcb_count;
+       uint16_t        fw_iocb_count;
 
        uint16_t        fw_options[16];         /* slots: 1,2,3,10,11 */
        uint8_t         fw_seriallink_options[4];
@@ -3056,7 +3075,16 @@ struct qla_hw_data {
        struct work_struct idc_state_handler;
        struct work_struct nic_core_unrecoverable;
 
+#define HOST_QUEUE_RAMPDOWN_INTERVAL           (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL             (30 * HZ)
+       unsigned long   host_last_rampdown_time;
+       unsigned long   host_last_rampup_time;
+       int             cfg_lun_q_depth;
+
        struct qlt_hw_data tgt;
+       uint16_t        thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
 };
 
 /*
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host {
 #define MPI_RESET_NEEDED       19      /* Initiate MPI FW reset */
 #define ISP_QUIESCE_NEEDED     20      /* Driver need some quiescence */
 #define SCR_PENDING            21      /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH     22
+#define HOST_RAMP_UP_QUEUE_DEPTH       23
 
        uint32_t        device_flags;
 #define SWITCH_FOUND           BIT_0
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map {
 
 #define NVRAM_DELAY()          udelay(10)
 
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
 /*
  * Flash support definitions
  */
index 706c4f7..792a292 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index be6d61a..1ac2b0e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -300,7 +300,8 @@ struct init_cb_24xx {
        uint32_t prio_request_q_address[2];
 
        uint16_t msix;
-       uint8_t reserved_2[6];
+       uint16_t msix_atio;
+       uint8_t reserved_2[4];
 
        uint16_t atio_q_inpointer;
        uint16_t atio_q_length;
@@ -1387,9 +1388,7 @@ struct qla_flt_header {
 #define FLT_REG_FCP_PRIO_0     0x87
 #define FLT_REG_FCP_PRIO_1     0x88
 #define FLT_REG_FCOE_FW                0xA4
-#define FLT_REG_FCOE_VPD_0     0xA9
 #define FLT_REG_FCOE_NVRAM_0   0xAA
-#define FLT_REG_FCOE_VPD_1     0xAB
 #define FLT_REG_FCOE_NVRAM_1   0xAC
 
 struct qla_flt_region {
index 2411d1a..eb3ca21 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
 extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
 extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
 
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
 
 extern void qla84xx_put_chip(struct scsi_qla_host *);
 
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
 extern void qla83xx_reset_ownership(scsi_qla_host_t *);
 extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
 
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -94,6 +97,7 @@ extern int qlport_down_retry;
 extern int ql2xplogiabsentdevice;
 extern int ql2xloginretrycount;
 extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
 extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
 extern int ql2xiidmaenable;
@@ -277,6 +281,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
 extern int
 qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
 
+extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
 extern int
 qla2x00_lip_reset(scsi_qla_host_t *);
 
@@ -350,6 +357,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
 extern int
 qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
 
+extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
 extern int
 qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
        uint16_t, uint16_t, uint16_t, uint16_t);
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
     uint32_t);
 extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
     uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
 
 extern int qla2x00_beacon_on(struct scsi_qla_host *);
 extern int qla2x00_beacon_off(struct scsi_qla_host *);
index 01efc0e..9b45525 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1328,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
        /* Manufacturer. */
        eiter = (struct ct_fdmi_hba_attr *) (entries + size);
        eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
-       strcpy(eiter->a.manufacturer, "QLogic Corporation");
-       alen = strlen(eiter->a.manufacturer);
+       alen = strlen(QLA2XXX_MANUFACTURER);
+       strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
        alen += (alen & 3) ? (4 - (alen & 3)) : 4;
        eiter->len = cpu_to_be16(4 + alen);
        size += 4 + alen;
@@ -1649,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
        /* OS device name. */
        eiter = (struct ct_fdmi_port_attr *) (entries + size);
        eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
-       strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
-       alen = strlen(eiter->a.os_dev_name);
+       alen = strlen(QLA2XXX_DRIVER_NAME);
+       strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
        alen += (alen & 3) ? (4 - (alen & 3)) : 4;
        eiter->len = cpu_to_be16(4 + alen);
        size += 4 + alen;
index 563eee3..edf4d14 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr)
        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
 
        del_timer(&iocb->timer);
-       mempool_free(sp, vha->hw->srb_mempool);
-
-       QLA_VHA_MARK_NOT_BUSY(vha);
+       qla2x00_rel_sp(vha, sp);
 }
 
 /* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        vha->flags.reset_active = 0;
        ha->flags.pci_channel_io_perm_failure = 0;
        ha->flags.eeh_busy = 0;
-       ha->flags.thermal_supported = 1;
+       ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
        atomic_set(&vha->loop_state, LOOP_DOWN);
        vha->device_flags = DFLG_NO_CABLE;
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                qla24xx_read_fcp_prio_cfg(vha);
 
+       qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
        return (rval);
 }
 
@@ -1559,6 +1559,47 @@ done:
        return rval;
 }
 
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+       /* Don't try to reallocate the array */
+       if (req->outstanding_cmds)
+               return QLA_SUCCESS;
+
+       if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+           (ql2xmultique_tag || ql2xmaxqueues > 1)))
+               req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+       else {
+               if (ha->fw_xcb_count <= ha->fw_iocb_count)
+                       req->num_outstanding_cmds = ha->fw_xcb_count;
+               else
+                       req->num_outstanding_cmds = ha->fw_iocb_count;
+       }
+
+       req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+           req->num_outstanding_cmds, GFP_KERNEL);
+
+       if (!req->outstanding_cmds) {
+               /*
+                * Try to allocate a minimal size just so we can get through
+                * initialization.
+                */
+               req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+               req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+                   req->num_outstanding_cmds, GFP_KERNEL);
+
+               if (!req->outstanding_cmds) {
+                       ql_log(ql_log_fatal, NULL, 0x0126,
+                           "Failed to allocate memory for "
+                           "outstanding_cmds for req_que %p.\n", req);
+                       req->num_outstanding_cmds = 0;
+                       return QLA_FUNCTION_FAILED;
+               }
+       }
+
+       return QLA_SUCCESS;
+}
+
 /**
  * qla2x00_setup_chip() - Load and start RISC firmware.
  * @ha: HA context
@@ -1628,9 +1669,18 @@ enable_82xx_npiv:
                                                    MIN_MULTI_ID_FABRIC - 1;
                                }
                                qla2x00_get_resource_cnts(vha, NULL,
-                                   &ha->fw_xcb_count, NULL, NULL,
+                                   &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
                                    &ha->max_npiv_vports, NULL);
 
+                               /*
+                                * Allocate the array of outstanding commands
+                                * now that we know the firmware resources.
+                                */
+                               rval = qla2x00_alloc_outstanding_cmds(ha,
+                                   vha->req);
+                               if (rval != QLA_SUCCESS)
+                                       goto failed;
+
                                if (!fw_major_version && ql2xallocfwdump
                                    && !IS_QLA82XX(ha))
                                        qla2x00_alloc_fw_dump(vha);
@@ -1914,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
                WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
                WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
        }
-       qlt_24xx_config_rings(vha, reg);
+       qlt_24xx_config_rings(vha);
 
        /* PCI posting */
        RD_REG_DWORD(&ioreg->hccr);
@@ -1948,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                req = ha->req_q_map[que];
                if (!req)
                        continue;
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+               for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
                        req->outstanding_cmds[cnt] = NULL;
 
                req->current_outstanding_cmd = 1;
@@ -2157,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        char            connect_type[22];
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
        /* Get host addresses. */
        rval = qla2x00_get_adapter_id(vha,
@@ -2170,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
                } else {
                        ql_log(ql_log_warn, vha, 0x2009,
                            "Unable to get host loop ID.\n");
+                       if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+                           (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+                               ql_log(ql_log_warn, vha, 0x1151,
+                                   "Doing link init.\n");
+                               if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+                                       return rval;
+                       }
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                }
                return (rval);
@@ -2690,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
        fcport->loop_id = FC_NO_LOOP_ID;
        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        fcport->supported_classes = FC_COS_UNSPECIFIED;
-       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
 
        return fcport;
 }
@@ -3103,7 +3160,7 @@ static int
 qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
        int     rval;
-       fc_port_t       *fcport;
+       fc_port_t       *fcport, *fcptemp;
        uint16_t        next_loopid;
        uint16_t        mb[MAILBOX_REGISTER_COUNT];
        uint16_t        loop_id;
@@ -3141,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                    0xfc, mb, BIT_1|BIT_0);
                if (rval != QLA_SUCCESS) {
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       break;
+                       return rval;
                }
                if (mb[0] != MBS_COMMAND_COMPLETE) {
                        ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -3173,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        }
                }
 
+#define QLA_FCPORT_SCAN                1
+#define QLA_FCPORT_FOUND       2
+
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       fcport->scan_state = QLA_FCPORT_SCAN;
+               }
+
                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
                if (rval != QLA_SUCCESS)
                        break;
 
-               /* Add new ports to existing port list */
-               list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
-               /* Starting free loop ID. */
-               next_loopid = ha->min_external_loopid;
-
+               /*
+                * Logout all previous fabric devices marked lost, except
+                * FCP2 devices.
+                */
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
@@ -3190,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       /* Logout lost/gone fabric devices (non-FCP2) */
-                       if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                qla2x00_mark_device_lost(vha, fcport,
                                    ql2xplogiabsentdevice, 0);
@@ -3204,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                                            fcport->d_id.b.domain,
                                            fcport->d_id.b.area,
                                            fcport->d_id.b.al_pa);
+                                       fcport->loop_id = FC_NO_LOOP_ID;
                                }
-                               continue;
                        }
-                       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
-                       /* Login fabric devices that need a login */
-                       if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
-                           atomic_read(&vha->loop_down_timer) == 0) {
-                               if (fcport->loop_id == FC_NO_LOOP_ID) {
-                                       fcport->loop_id = next_loopid;
-                                       rval = qla2x00_find_new_loop_id(
-                                           base_vha, fcport);
-                                       if (rval != QLA_SUCCESS) {
-                                               /* Ran out of IDs to use */
-                                               continue;
-                                       }
+               }
+
+               /* Starting free loop ID. */
+               next_loopid = ha->min_external_loopid;
+
+               /*
+                * Scan through our port list and login entries that need to be
+                * logged in.
+                */
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       if (atomic_read(&vha->loop_down_timer) ||
+                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+                               break;
+
+                       if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+                           (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+                               continue;
+
+                       if (fcport->loop_id == FC_NO_LOOP_ID) {
+                               fcport->loop_id = next_loopid;
+                               rval = qla2x00_find_new_loop_id(
+                                   base_vha, fcport);
+                               if (rval != QLA_SUCCESS) {
+                                       /* Ran out of IDs to use */
+                                       break;
                                }
                        }
+                       /* Login and update database */
+                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+               }
+
+               /* Exit if out of loop IDs. */
+               if (rval != QLA_SUCCESS) {
+                       break;
+               }
+
+               /*
+                * Login and add the new devices to our port list.
+                */
+               list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+                       if (atomic_read(&vha->loop_down_timer) ||
+                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+                               break;
+
+                       /* Find a new loop ID to use. */
+                       fcport->loop_id = next_loopid;
+                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
+                       if (rval != QLA_SUCCESS) {
+                               /* Ran out of IDs to use */
+                               break;
+                       }
 
                        /* Login and update database */
                        qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+                       list_move_tail(&fcport->list, &vha->vp_fcports);
                }
        } while (0);
 
+       /* Free all new device structures not processed. */
+       list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+               list_del(&fcport->list);
+               kfree(fcport);
+       }
+
        if (rval) {
                ql_dbg(ql_dbg_disc, vha, 0x2068,
                    "Configure fabric error exit rval=%d.\n", rval);
@@ -3263,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
        int             first_dev, last_dev;
        port_id_t       wrap = {}, nxt_d_id;
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
-       struct scsi_qla_host *tvp;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
        rval = QLA_SUCCESS;
 
@@ -3377,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                        continue;
 
                /* Bypass virtual ports of the same host. */
-               found = 0;
-               if (ha->num_vhosts) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&ha->vport_slock, flags);
-                       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                               if (new_fcport->d_id.b24 == vp->d_id.b24) {
-                                       found = 1;
-                                       break;
-                               }
-                       }
-                       spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-                       if (found)
-                               continue;
-               }
+               if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+                       continue;
 
                /* Bypass if same domain and area of adapter. */
                if (((new_fcport->d_id.b24 & 0xffff00) ==
@@ -3417,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                            WWN_SIZE))
                                continue;
 
-                       fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
+                       fcport->scan_state = QLA_FCPORT_FOUND;
 
                        found++;
 
@@ -5004,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
        return rval;
 }
 
-#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
 
 int
 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
@@ -5529,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        if (IS_T10_PI_CAPABLE(ha))
                nv->frame_payload_size &= ~7;
 
+       qlt_81xx_config_nvram_stage1(vha, nv);
+
        /* Reset Initialization control block */
        memset(icb, 0, ha->init_cb_size);
 
@@ -5569,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
            "QLE8XXX");
 
+       qlt_81xx_config_nvram_stage2(vha, icb);
+
        /* Use alternate WWN? */
        if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
index c0462c0..68e2c4a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -197,6 +197,13 @@ done:
        return sp;
 }
 
+static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+       mempool_free(sp, vha->hw->srb_mempool);
+       QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
 static inline void
 qla2x00_init_timer(srb_t *sp, unsigned long tmo)
 {
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
 {
        return sizeof(struct gid_list_info) * ha->max_fibre_devices;
 }
+
+static inline void
+qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
+{
+       if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
+               return;
+
+       /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
+       if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
+           HOST_QUEUE_RAMPDOWN_INTERVAL)))
+               return;
+
+       /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
+       if (time_before(jiffies, (vha->hw->host_last_rampup_time +
+           HOST_QUEUE_RAMPUP_INTERVAL)))
+               return;
+
+       set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
+}
index a481684..d263031 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS)
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
 
        /* Map the sg table so we have an accurate count of sg entries needed */
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS) {
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
-       }
 
        /* Map the sg table so we have an accurate count of sg entries needed */
        if (scsi_sg_count(cmd)) {
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
 
-       if (index == MAX_OUTSTANDING_COMMANDS)
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
 
        /* Compute number of required data segments */
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS) {
+       if (index == req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x700b,
                    "No room on outstanding cmd array.\n");
                goto queuing_error;
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS)
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
 
        /* Map the sg table so we have an accurate count of sg entries needed */
@@ -2767,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-       if (handle == MAX_OUTSTANDING_COMMANDS)
+       if (handle == req->num_outstanding_cmds)
                handle = 1;
        if (!req->outstanding_cmds[handle])
                break;
        }
 
-       if (index == MAX_OUTSTANDING_COMMANDS) {
+       if (index == req->num_outstanding_cmds) {
                rval = EXT_STATUS_BUSY;
                goto queuing_error;
        }
index 873c820..e9dbd74 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -13,6 +13,8 @@
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
 
+#include "qla_target.h"
+
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
 static void qla2x00_process_completed_request(struct scsi_qla_host *,
        struct req_que *, uint32_t);
@@ -489,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
        if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
                ql_log(ql_log_info, vha, 0x506a,
                    "IDC Device-State changed = 0x%x.\n", mb[4]);
+               if (ha->flags.nic_core_reset_owner)
+                       return;
                qla83xx_schedule_work(vha, MBA_IDC_AEN);
        }
 }
 
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+       struct qla_hw_data *ha = vha->hw;
+       scsi_qla_host_t *vp;
+       uint32_t vp_did;
+       unsigned long flags;
+       int ret = 0;
+
+       if (!ha->num_vhosts)
+               return ret;
+
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               vp_did = vp->d_id.b24;
+               if (vp_did == rscn_entry) {
+                       ret = 1;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       return ret;
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -899,6 +928,10 @@ skip_rio:
                /* Ignore reserved bits from RSCN-payload. */
                rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
 
+               /* Skip RSCNs for virtual ports on the same physical port */
+               if (qla2x00_is_a_vp_did(vha, rscn_entry))
+                       break;
+
                atomic_set(&vha->loop_down_timer, 0);
                vha->flags.management_server_logged_in = 0;
 
@@ -983,14 +1016,25 @@ skip_rio:
                    mb[1], mb[2], mb[3]);
                break;
        case MBA_IDC_NOTIFY:
-               /* See if we need to quiesce any I/O */
-               if (IS_QLA8031(vha->hw))
-                       if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
-                           (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+               if (IS_QLA8031(vha->hw)) {
+                       mb[4] = RD_REG_WORD(&reg24->mailbox4);
+                       if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+                           (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+                           (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
                                set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+                               /*
+                                * Extend loop down timer since port is active.
+                                */
+                               if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+                                       atomic_set(&vha->loop_down_timer,
+                                           LOOP_DOWN_TIME);
                                qla2xxx_wake_dpc(vha);
                        }
+               }
        case MBA_IDC_COMPLETE:
+               if (ha->notify_lb_portup_comp)
+                       complete(&ha->lb_portup_comp);
+               /* Fallthru */
        case MBA_IDC_TIME_EXT:
                if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
                        qla81xx_idc_event(vha, mb[0], mb[1]);
@@ -1029,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
        struct qla_hw_data *ha = vha->hw;
 
        /* Validate handle. */
-       if (index >= MAX_OUTSTANDING_COMMANDS) {
+       if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x3014,
                    "Invalid SCSI command index (%x).\n", index);
 
@@ -1067,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
        uint16_t index;
 
        index = LSW(pkt->handle);
-       if (index >= MAX_OUTSTANDING_COMMANDS) {
+       if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x5031,
                    "Invalid command index (%x).\n", index);
                if (IS_QLA82XX(ha))
@@ -1740,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
        sts24 = (struct sts_entry_24xx *) pkt;
 
        /* Validate handle. */
-       if (index >= MAX_OUTSTANDING_COMMANDS) {
+       if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x70af,
                    "Invalid SCSI completion handle 0x%x.\n", index);
                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1910,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        req = ha->req_q_map[que];
 
        /* Validate handle. */
-       if (handle < MAX_OUTSTANDING_COMMANDS) {
+       if (handle < req->num_outstanding_cmds)
                sp = req->outstanding_cmds[handle];
-       else
+       else
                sp = NULL;
 
        if (sp == NULL) {
@@ -1934,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 
        /* Fast path completion. */
        if (comp_status == CS_COMPLETE && scsi_status == 0) {
+               qla2x00_do_host_ramp_up(vha);
                qla2x00_process_completed_request(vha, req, handle);
 
                return;
@@ -2193,6 +2238,9 @@ out:
                    cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
                    resid_len, fw_resid_len);
 
+       if (!res)
+               qla2x00_do_host_ramp_up(vha);
+
        if (rsp->status_srb == NULL)
                sp->done(ha, sp, res);
 }
@@ -2747,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
        { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
 };
 
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+       { "qla2xxx (default)", qla24xx_msix_default },
+       { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+       { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
 static void
 qla24xx_disable_msix(struct qla_hw_data *ha)
 {
@@ -2827,9 +2881,13 @@ msix_failed:
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < ha->msix_count; i++) {
                qentry = &ha->msix_entries[i];
-               if (IS_QLA82XX(ha)) {
+               if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+                       ret = request_irq(qentry->vector,
+                               qla83xx_msix_entries[i].handler,
+                               0, qla83xx_msix_entries[i].name, rsp);
+               } else if (IS_QLA82XX(ha)) {
                        ret = request_irq(qentry->vector,
                                qla82xx_msix_entries[i].handler,
                                0, qla82xx_msix_entries[i].name, rsp);
index 68c55ea..186dd59 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp)
            "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+       for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
                        break;
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (handle == MAX_OUTSTANDING_COMMANDS) {
+       if (handle == req->num_outstanding_cmds) {
                /* command not found */
                return QLA_FUNCTION_FAILED;
        }
@@ -1632,6 +1632,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
        return rval;
 }
 
+/*
+ * qla24xx_link_initialization
+ *     Issue link initialization mailbox command.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     TARGET_QUEUE_LOCK must be released.
+ *     ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *     qla2x00 local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+           "Entered %s.\n", __func__);
+
+       if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       mcp->mb[0] = MBC_LINK_INITIALIZATION;
+       mcp->mb[1] = BIT_6|BIT_4;
+       mcp->mb[2] = 0;
+       mcp->mb[3] = 0;
+       mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
 /*
  * qla2x00_lip_reset
  *     Issue LIP reset mailbox command.
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp)
            "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+       for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
                        break;
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (handle == MAX_OUTSTANDING_COMMANDS) {
+       if (handle == req->num_outstanding_cmds) {
                /* Command not found. */
                return QLA_FUNCTION_FAILED;
        }
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
        unsigned long   flags;
+       int found;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
            "Entered %s.\n", __func__);
@@ -3128,13 +3177,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        return;
                }
 
+               found = 0;
                spin_lock_irqsave(&ha->vport_slock, flags);
-               list_for_each_entry(vp, &ha->vp_list, list)
-                       if (vp_idx == vp->vp_idx)
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp_idx == vp->vp_idx) {
+                               found = 1;
                                break;
+                       }
+               }
                spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-               if (!vp)
+               if (!found)
                        return;
 
                vp->d_id.b.domain = rptid_entry->port_id[2];
@@ -3813,6 +3866,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
        return rval;
 }
 
+int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+       int len;
+       uint16_t dwlen;
+       uint8_t *str;
+       dma_addr_t str_dma;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+           "Entered %s.\n", __func__);
+
+       str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+       if (!str) {
+               ql_log(ql_log_warn, vha, 0x1156,
+                   "Failed to allocate driver version param.\n");
+               return QLA_MEMORY_ALLOC_FAILED;
+       }
+
+       memcpy(str, "\x7\x3\x11\x0", 4);
+       dwlen = str[0];
+       len = dwlen * sizeof(uint32_t) - 4;
+       memset(str + 4, 0, len);
+       if (len > strlen(version))
+               len = strlen(version);
+       memcpy(str + 4, version, len);
+
+       mcp->mb[0] = MBC_SET_RNID_PARAMS;
+       mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+       mcp->mb[2] = MSW(LSD(str_dma));
+       mcp->mb[3] = LSW(LSD(str_dma));
+       mcp->mb[6] = MSW(MSD(str_dma));
+       mcp->mb[7] = LSW(MSD(str_dma));
+       mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1157,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+                   "Done %s.\n", __func__);
+       }
+
+       dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+       return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_FWI2_CAPABLE(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_GET_RNID_PARAMS;
+       mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+       mcp->out_mb = MBX_1|MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+       *temp = mcp->mb[1];
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x115a,
+                   "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
 int
 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
 }
 
 int
-qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
 {
-       int rval;
-       uint8_t byte;
+       int rval = QLA_FUNCTION_FAILED;
        struct qla_hw_data *ha = vha->hw;
+       uint8_t byte;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
            "Entered %s.\n", __func__);
 
-       /* Integer part */
-       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
-               BIT_13|BIT_12|BIT_0);
-       if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
-               ha->flags.thermal_supported = 0;
-               goto fail;
+       if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
+               rval = qla2x00_read_sfp(vha, 0, &byte,
+                   0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
+               *temp = byte;
+               if (rval == QLA_SUCCESS)
+                       goto done;
+
+               ql_log(ql_log_warn, vha, 0x10c9,
+                   "Thermal not supported by I2C.\n");
+               ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
        }
-       *temp = byte;
 
-       /* Fraction part */
-       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
-               BIT_13|BIT_12|BIT_0);
-       if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
-               ha->flags.thermal_supported = 0;
-               goto fail;
+       if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
+               rval = qla2x00_read_asic_temperature(vha, temp);
+               if (rval == QLA_SUCCESS)
+                       goto done;
+
+               ql_log(ql_log_warn, vha, 0x1019,
+                   "Thermal not supported by ISP.\n");
+               ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
        }
-       *frac = (byte >> 6) * 25;
 
+       ql_log(ql_log_warn, vha, 0x1150,
+           "Thermal not supported by this card "
+           "(ignoring further requests).\n");
+       return  rval;
+
+done:
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
            "Done %s.\n", __func__);
-fail:
        return rval;
 }
 
index 20fd974..f868a9f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
                clear_bit(que_id, ha->req_qid_map);
                mutex_unlock(&ha->vport_lock);
        }
+       kfree(req->outstanding_cmds);
        kfree(req);
        req = NULL;
 }
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
                goto que_failed;
        }
 
+       ret = qla2x00_alloc_outstanding_cmds(ha, req);
+       if (ret != QLA_SUCCESS)
+               goto que_failed;
+
        mutex_lock(&ha->vport_lock);
        que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
        if (que_id >= ha->max_req_queues) {
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
            "options=0x%x.\n", req->options);
        ql_dbg(ql_dbg_init, base_vha, 0x00dd,
            "options=0x%x.\n", req->options);
-       for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+       for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
                req->outstanding_cmds[cnt] = NULL;
        req->current_outstanding_cmd = 1;
 
index 3e3f593..10754f5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -847,14 +847,21 @@ static int
 qla82xx_rom_lock(struct qla_hw_data *ha)
 {
        int done = 0, timeout = 0;
+       uint32_t lock_owner = 0;
+       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        while (!done) {
                /* acquire semaphore2 from PCI HW block */
                done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
                if (done == 1)
                        break;
-               if (timeout >= qla82xx_rom_lock_timeout)
+               if (timeout >= qla82xx_rom_lock_timeout) {
+                       lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+                       ql_dbg(ql_dbg_p3p, vha, 0xb085,
+                           "Failed to acquire rom lock, acquired by %d.\n",
+                           lock_owner);
                        return -1;
+               }
                timeout++;
        }
        qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
@@ -3629,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        req = ha->req_q_map[que];
                        if (!req)
                                continue;
-                       for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+                       for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                                sp = req->outstanding_cmds[cnt];
                                if (sp) {
                                        if (!sp->u.scmd.ctx ||
index 6c953e8..d268e84 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -897,7 +897,7 @@ struct ct6_dsd {
 #define FLT_REG_BOOT_CODE_82XX 0x78
 #define FLT_REG_FW_82XX                0x74
 #define FLT_REG_GOLD_FW_82XX   0x75
-#define FLT_REG_VPD_82XX       0x81
+#define FLT_REG_VPD_8XXX       0x81
 
 #define        FA_VPD_SIZE_82XX        0x400
 
index 10d23f8..2c6dd3d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -111,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable,
                "Enables FDMI registrations. "
                "0 - no FDMI. Default is 1 - perform FDMI.");
 
-#define MAX_Q_DEPTH    32
-static int ql2xmaxqdepth = MAX_Q_DEPTH;
+int ql2xmaxqdepth = MAX_Q_DEPTH;
 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
                "Maximum queue depth to set for each LUN. "
@@ -360,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
                (req->length + 1) * sizeof(request_t),
                req->ring, req->dma);
 
+       if (req)
+               kfree(req->outstanding_cmds);
+
        kfree(req);
        req = NULL;
 }
@@ -628,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        }
 
        CMD_SP(cmd) = NULL;
-       mempool_free(sp, ha->srb_mempool);
+       qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
 static void
@@ -716,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto qc24_target_busy;
        }
 
-       sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
-       if (!sp)
+       sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+       if (!sp) {
+               set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
                goto qc24_host_busy;
+       }
 
        sp->u.scmd.cmd = cmd;
        sp->type = SRB_SCSI_CMD;
@@ -731,6 +735,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
                    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+               set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
                goto qc24_host_busy_free_sp;
        }
 
@@ -1010,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
        spin_lock_irqsave(&ha->hardware_lock, flags);
        req = vha->req;
        for (cnt = 1; status == QLA_SUCCESS &&
-               cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               cnt < req->num_outstanding_cmds; cnt++) {
                sp = req->outstanding_cmds[cnt];
                if (!sp)
                        continue;
@@ -1300,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
        }
 
        if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+               atomic_set(&vha->loop_state, LOOP_DOWN);
+               atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+               qla2x00_mark_all_devices_lost(vha, 0);
                ret = qla2x00_full_login_lip(vha);
                if (ret != QLA_SUCCESS) {
                        ql_dbg(ql_dbg_taskm, vha, 0x802d,
                            "full_login_lip=%d.\n", ret);
                }
-               atomic_set(&vha->loop_state, LOOP_DOWN);
-               atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-               qla2x00_mark_all_devices_lost(vha, 0);
        }
 
        if (ha->flags.enable_lip_reset) {
@@ -1337,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                req = ha->req_q_map[que];
                if (!req)
                        continue;
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               if (!req->outstanding_cmds)
+                       continue;
+               for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                        sp = req->outstanding_cmds[cnt];
                        if (sp) {
                                req->outstanding_cmds[cnt] = NULL;
@@ -1453,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
        return tag_type;
 }
 
+static void
+qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
+{
+       scsi_qla_host_t *vp;
+       struct Scsi_Host *shost;
+       struct scsi_device *sdev;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ha->host_last_rampdown_time = jiffies;
+
+       if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
+               return;
+
+       if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
+               ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
+       else
+               ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
+
+       /*
+        * Geometrically ramp down the queue depth for all devices on this
+        * adapter
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               shost = vp->host;
+               shost_for_each_device(sdev, shost) {
+                       if (sdev->queue_depth > shost->cmd_per_lun) {
+                               if (sdev->queue_depth < ha->cfg_lun_q_depth)
+                                       continue;
+                               ql_log(ql_log_warn, vp, 0x3031,
+                                   "%ld:%d:%d: Ramping down queue depth to %d",
+                                   vp->host_no, sdev->id, sdev->lun,
+                                   ha->cfg_lun_q_depth);
+                               qla2x00_change_queue_depth(sdev,
+                                   ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       return;
+}
+
+static void
+qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
+{
+       scsi_qla_host_t *vp;
+       struct Scsi_Host *shost;
+       struct scsi_device *sdev;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ha->host_last_rampup_time = jiffies;
+       ha->cfg_lun_q_depth++;
+
+       /*
+        * Linearly ramp up the queue depth for all devices on this
+        * adapter
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               shost = vp->host;
+               shost_for_each_device(sdev, shost) {
+                       if (sdev->queue_depth > ha->cfg_lun_q_depth)
+                               continue;
+                       qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
+                           SCSI_QDEPTH_RAMP_UP);
+               }
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       return;
+}
+
 /**
  * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
  * @ha: HA context
@@ -1730,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
 
 mqiobase_exit:
        ha->msix_count = ha->max_rsp_queues + 1;
+
+       qlt_83xx_iospace_config(ha);
+
        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
            "MSIX Count:%d.\n", ha->msix_count);
        return 0;
@@ -2230,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ha->init_cb_size = sizeof(init_cb_t);
        ha->link_data_rate = PORT_SPEED_UNKNOWN;
        ha->optrom_size = OPTROM_SIZE_2300;
+       ha->cfg_lun_q_depth = ql2xmaxqdepth;
 
        /* Assign ISP specific operations. */
        if (IS_QLA2100(ha)) {
@@ -2307,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
@@ -2338,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
@@ -2377,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        complete(&ha->mbx_cmd_comp);
        init_completion(&ha->mbx_intr_comp);
        init_completion(&ha->dcbx_comp);
+       init_completion(&ha->lb_portup_comp);
 
        set_bit(0, (unsigned long *) ha->vp_idx_map);
 
@@ -2720,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
        scsi_qla_host_t *vha;
        struct qla_hw_data  *ha;
 
+       if (!atomic_read(&pdev->enable_cnt))
+               return;
+
        vha = pci_get_drvdata(pdev);
        ha = vha->hw;
 
@@ -3974,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
        uint32_t idc_lck_rcvry_stage_mask = 0x3;
        uint32_t idc_lck_rcvry_owner_mask = 0x3c;
        struct qla_hw_data *ha = base_vha->hw;
+       ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+           "Trying force recovery of the IDC lock.\n");
 
        rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
        if (rval)
@@ -4065,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
 {
        uint16_t options = (requester_id << 15) | BIT_6;
        uint32_t data;
+       uint32_t lock_owner;
        struct qla_hw_data *ha = base_vha->hw;
 
        /* IDC-lock implementation using driver-lock/lock-id remote registers */
@@ -4076,8 +4171,11 @@ retry_lock:
                        qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
                            ha->portnum);
                } else {
+                       qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+                           &lock_owner);
                        ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
-                           "Failed to acquire IDC lock. retrying...\n");
+                           "Failed to acquire IDC lock, acquired by %d, "
+                           "retrying...\n", lock_owner);
 
                        /* Retry/Perform IDC-Lock recovery */
                        if (qla83xx_idc_lock_recovery(base_vha)
@@ -4605,6 +4703,18 @@ qla2x00_do_dpc(void *data)
                        qla2xxx_flash_npiv_conf(base_vha);
                }
 
+               if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
+                   &base_vha->dpc_flags)) {
+                       /* Prevents simultaneous ramp up and down */
+                       clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+                           &base_vha->dpc_flags);
+                       qla2x00_host_ramp_down_queuedepth(base_vha);
+               }
+
+               if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+                   &base_vha->dpc_flags))
+                       qla2x00_host_ramp_up_queuedepth(base_vha);
+
                if (!ha->interrupts_on)
                        ha->isp_ops->enable_intrs(ha);
 
@@ -4733,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
                                    cpu_flags);
                                req = ha->req_q_map[0];
                                for (index = 1;
-                                   index < MAX_OUTSTANDING_COMMANDS;
+                                   index < req->num_outstanding_cmds;
                                    index++) {
                                        fc_port_t *sfcp;
 
@@ -4802,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
            test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
            test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
            test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
-           test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+           test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+           test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
+           test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
                ql_dbg(ql_dbg_timer, vha, 0x600b,
                    "isp_abort_needed=%d loop_resync_needed=%d "
                    "fcport_update_needed=%d start_dpc=%d "
@@ -4815,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_timer, vha, 0x600c,
                    "beacon_blink_needed=%d isp_unrecoverable=%d "
                    "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
-                   "relogin_needed=%d.\n",
+                   "relogin_needed=%d, host_ramp_down_needed=%d "
+                   "host_ramp_up_needed=%d.\n",
                    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
                    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
                    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
                    test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
-                   test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+                   test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+                   test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
+                   test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
                qla2xxx_wake_dpc(vha);
        }
 
index 892a81e..46ef0ac 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 32fdc2a..3bef673 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                case FLT_REG_BOOTLOAD_82XX:
                        ha->flt_region_bootload = start;
                        break;
-               case FLT_REG_VPD_82XX:
-                       ha->flt_region_vpd = start;
-                       break;
-               case FLT_REG_FCOE_VPD_0:
-                       if (!IS_QLA8031(ha))
-                               break;
-                       ha->flt_region_vpd_nvram = start;
-                       if (ha->flags.port0)
-                               ha->flt_region_vpd = start;
-                       break;
-               case FLT_REG_FCOE_VPD_1:
-                       if (!IS_QLA8031(ha))
-                               break;
-                       if (!ha->flags.port0)
+               case FLT_REG_VPD_8XXX:
+                       if (IS_CNA_CAPABLE(ha))
                                ha->flt_region_vpd = start;
                        break;
                case FLT_REG_FCOE_NVRAM_0:
index 80f4b84..61b5d8c 100644 (file)
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode,
        "\"disabled\" - initiator mode will never be enabled; "
        "\"enabled\" (default) - initiator mode will always stay enabled.");
 
-static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
 
 /*
  * From scsi/fc/fc_fcp.h
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
        nack->u.isp24.status = ntfy->u.isp24.status;
        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+       nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
@@ -1570,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
        /* always increment cmd handle */
        do {
                ++h;
-               if (h > MAX_OUTSTANDING_COMMANDS)
+               if (h > DEFAULT_OUTSTANDING_COMMANDS)
                        h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
                if (h == ha->tgt.current_handle) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe04e,
@@ -2441,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
                        return NULL;
                }
                /* handle-1 is actually used */
-               if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+               if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe052,
                            "qla_target(%d): Wrong handle %x received\n",
                            vha->vp_idx, handle);
@@ -4305,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
        if (!QLA_TGT_MODE_ENABLED())
                return 0;
 
+       if (!IS_TGT_MODE_CAPABLE(ha)) {
+               ql_log(ql_log_warn, base_vha, 0xe070,
+                   "This adapter does not support target mode.\n");
+               return 0;
+       }
+
        ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
            "Registering target for host %ld(%p)", base_vha->host_no, ha);
 
@@ -4666,7 +4673,6 @@ void
 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
        struct atio_from_isp *pkt;
        int cnt, i;
 
@@ -4694,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
        }
 
        /* Adjust ring index */
-       WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+       WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
 }
 
 void
-qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
 {
        struct qla_hw_data *ha = vha->hw;
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
 
-/* FIXME: atio_q in/out for ha->mqenable=1..? */
-       if (ha->mqenable) {
-#if 0
-               WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
-               WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
-               RD_REG_DWORD(&reg->isp25mq.atio_q_out);
-#endif
-       } else {
-               /* Setup APTIO registers for target mode */
-               WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
-               WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
-               RD_REG_DWORD(&reg->isp24.atio_q_out);
+       WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+       WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+       RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+       if (IS_ATIO_MSIX_CAPABLE(ha)) {
+               struct qla_msix_entry *msix = &ha->msix_entries[2];
+               struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+               icb->msix_atio = cpu_to_le16(msix->entry);
+               ql_dbg(ql_dbg_init, vha, 0xf072,
+                   "Registering ICB vector 0x%x for atio que.\n",
+                   msix->entry);
        }
 }
 
@@ -4796,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
        }
 }
 
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       if (qla_tgt_mode_enabled(vha)) {
+               if (!ha->tgt.saved_set) {
+                       /* We save only once */
+                       ha->tgt.saved_exchange_count = nv->exchange_count;
+                       ha->tgt.saved_firmware_options_1 =
+                           nv->firmware_options_1;
+                       ha->tgt.saved_firmware_options_2 =
+                           nv->firmware_options_2;
+                       ha->tgt.saved_firmware_options_3 =
+                           nv->firmware_options_3;
+                       ha->tgt.saved_set = 1;
+               }
+
+               nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+               /* Enable target mode */
+               nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+               /* Disable ini mode, if requested */
+               if (!qla_ini_mode_enabled(vha))
+                       nv->firmware_options_1 |=
+                           __constant_cpu_to_le32(BIT_5);
+
+               /* Disable Full Login after LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+               /* Enable initial LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+               /* Enable FC tapes support */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+               /* Disable Full Login after LIP */
+               nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+               /* Enable target PRLI control */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+       } else {
+               if (ha->tgt.saved_set) {
+                       nv->exchange_count = ha->tgt.saved_exchange_count;
+                       nv->firmware_options_1 =
+                           ha->tgt.saved_firmware_options_1;
+                       nv->firmware_options_2 =
+                           ha->tgt.saved_firmware_options_2;
+                       nv->firmware_options_3 =
+                           ha->tgt.saved_firmware_options_3;
+               }
+               return;
+       }
+
+       /* out-of-order frames reassembly */
+       nv->firmware_options_3 |= BIT_6|BIT_9;
+
+       if (ha->tgt.enable_class_2) {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) =
+                               FC_COS_CLASS2 | FC_COS_CLASS3;
+
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+       } else {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+               nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+       }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+       struct init_cb_81xx *icb)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       if (ha->tgt.node_name_set) {
+               memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+               icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+       }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       ha->msix_count += 1; /* For ATIO Q */
+}
+
 int
 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
        struct sts_entry_24xx *pkt)
@@ -4828,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
+       if  (ha->mqenable || IS_QLA83XX(ha)) {
+               ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+               ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+       } else {
+               ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+               ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+       }
+
        mutex_init(&ha->tgt.tgt_mutex);
        mutex_init(&ha->tgt.tgt_host_action_mutex);
        qlt_clear_mode(base_vha);
 }
 
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+       struct rsp_que *rsp;
+       scsi_qla_host_t *vha;
+       struct qla_hw_data *ha;
+       unsigned long flags;
+
+       rsp = (struct rsp_que *) dev_id;
+       ha = rsp->hw;
+       vha = pci_get_drvdata(ha->pdev);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       qlt_24xx_process_atio_queue(vha);
+       qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
 int
 qlt_mem_alloc(struct qla_hw_data *ha)
 {
index bad7495..ff9ccb9 100644 (file)
@@ -60,8 +60,9 @@
  * multi-complete should come to the tgt driver or be handled there by qla2xxx
  */
 #define CTIO_COMPLETION_HANDLE_MARK    BIT_29
-#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+       "DEFAULT_OUTSTANDING_COMMANDS"
 #endif
 #define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
 
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp {
                        uint16_t srr_rx_id;
                        uint16_t status;
                        uint8_t  status_subcode;
-                       uint8_t  reserved_3;
+                       uint8_t  fw_handle;
                        uint32_t exchange_address;
                        uint32_t srr_rel_offs;
                        uint16_t srr_ui;
@@ -217,7 +218,7 @@ struct nack_to_isp {
                        uint16_t srr_rx_id;
                        uint16_t status;
                        uint8_t  status_subcode;
-                       uint8_t  reserved_3;
+                       uint8_t  fw_handle;
                        uint32_t exchange_address;
                        uint32_t srr_rel_offs;
                        uint16_t srr_ui;
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
  * is not set. Right now, ha value is ignored.
  */
 #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
 
 static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
 {
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
 extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
 extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
 extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
-extern void qlt_24xx_config_rings(struct scsi_qla_host *,
-       device_reg_t __iomem *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
 extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
        struct nvram_24xx *);
 extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
        struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+       struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+       struct nvram_81xx *);
 extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
        struct sts_entry_24xx *);
 extern void qlt_modify_vp_config(struct scsi_qla_host *,
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *);
 extern void qlt_mem_free(struct qla_hw_data *);
 extern void qlt_stop_phase1(struct qla_tgt *);
 extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
 
 #endif /* __QLA_TARGET_H */
index 49697ca..2b6e478 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 81e738d..160d336 100644 (file)
@@ -1420,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
        dma_addr_t chap_dma;
 
        chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
-       if (chap_table == NULL) {
-               ret = -ENOMEM;
-               goto exit_get_chap;
-       }
+       if (chap_table == NULL)
+               return -ENOMEM;
 
        chap_size = sizeof(struct ql4_chap_table);
        memset(chap_table, 0, chap_size);
index 270b3cf..16a3a0c 100644 (file)
@@ -201,6 +201,7 @@ enum storvsc_request_type {
 #define SRB_STATUS_AUTOSENSE_VALID     0x80
 #define SRB_STATUS_INVALID_LUN 0x20
 #define SRB_STATUS_SUCCESS     0x01
+#define SRB_STATUS_ABORTED     0x02
 #define SRB_STATUS_ERROR       0x04
 
 /*
@@ -295,6 +296,25 @@ struct storvsc_scan_work {
        uint lun;
 };
 
+static void storvsc_device_scan(struct work_struct *work)
+{
+       struct storvsc_scan_work *wrk;
+       uint lun;
+       struct scsi_device *sdev;
+
+       wrk = container_of(work, struct storvsc_scan_work, work);
+       lun = wrk->lun;
+
+       sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+       if (!sdev)
+               goto done;
+       scsi_rescan_device(&sdev->sdev_gendev);
+       scsi_device_put(sdev);
+
+done:
+       kfree(wrk);
+}
+
 static void storvsc_bus_scan(struct work_struct *work)
 {
        struct storvsc_scan_work *wrk;
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
        if (!bounce_sgl)
                return NULL;
 
+       sg_init_table(bounce_sgl, num_pages);
        for (i = 0; i < num_pages; i++) {
                page_buf = alloc_page(GFP_ATOMIC);
                if (!page_buf)
@@ -760,6 +781,66 @@ cleanup:
        return ret;
 }
 
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+                               struct scsi_cmnd *scmnd,
+                               struct Scsi_Host *host,
+                               u8 asc, u8 ascq)
+{
+       struct storvsc_scan_work *wrk;
+       void (*process_err_fn)(struct work_struct *work);
+       bool do_work = false;
+
+       switch (vm_srb->srb_status) {
+       case SRB_STATUS_ERROR:
+               /*
+                * If there is an error; offline the device since all
+                * error recovery strategies would have already been
+                * deployed on the host side. However, if the command
+                * were a pass-through command deal with it appropriately.
+                */
+               switch (scmnd->cmnd[0]) {
+               case ATA_16:
+               case ATA_12:
+                       set_host_byte(scmnd, DID_PASSTHROUGH);
+                       break;
+               default:
+                       set_host_byte(scmnd, DID_TARGET_FAILURE);
+               }
+               break;
+       case SRB_STATUS_INVALID_LUN:
+               do_work = true;
+               process_err_fn = storvsc_remove_lun;
+               break;
+       case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+               if ((asc == 0x2a) && (ascq == 0x9)) {
+                       do_work = true;
+                       process_err_fn = storvsc_device_scan;
+                       /*
+                        * Retry the I/O that trigerred this.
+                        */
+                       set_host_byte(scmnd, DID_REQUEUE);
+               }
+               break;
+       }
+
+       if (!do_work)
+               return;
+
+       /*
+        * We need to schedule work to process this error; schedule it.
+        */
+       wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+       if (!wrk) {
+               set_host_byte(scmnd, DID_TARGET_FAILURE);
+               return;
+       }
+
+       wrk->host = host;
+       wrk->lun = vm_srb->lun;
+       INIT_WORK(&wrk->work, process_err_fn);
+       schedule_work(&wrk->work);
+}
+
 
 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
 {
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
        void (*scsi_done_fn)(struct scsi_cmnd *);
        struct scsi_sense_hdr sense_hdr;
        struct vmscsi_request *vm_srb;
-       struct storvsc_scan_work *wrk;
        struct stor_mem_pools *memp = scmnd->device->hostdata;
+       struct Scsi_Host *host;
+       struct storvsc_device *stor_dev;
+       struct hv_device *dev = host_dev->dev;
+
+       stor_dev = get_in_stor_device(dev);
+       host = stor_dev->host;
 
        vm_srb = &cmd_request->vstor_packet.vm_srb;
        if (cmd_request->bounce_sgl_count) {
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
                                        cmd_request->bounce_sgl_count);
        }
 
-       /*
-        * If there is an error; offline the device since all
-        * error recovery strategies would have already been
-        * deployed on the host side. However, if the command
-        * were a pass-through command deal with it appropriately.
-        */
        scmnd->result = vm_srb->scsi_status;
 
-       if (vm_srb->srb_status == SRB_STATUS_ERROR) {
-               switch (scmnd->cmnd[0]) {
-               case ATA_16:
-               case ATA_12:
-                       set_host_byte(scmnd, DID_PASSTHROUGH);
-                       break;
-               default:
-                       set_host_byte(scmnd, DID_TARGET_FAILURE);
-               }
-       }
-
-
-       /*
-        * If the LUN is invalid; remove the device.
-        */
-       if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
-               struct storvsc_device *stor_dev;
-               struct hv_device *dev = host_dev->dev;
-               struct Scsi_Host *host;
-
-               stor_dev = get_in_stor_device(dev);
-               host = stor_dev->host;
-
-               wrk = kmalloc(sizeof(struct storvsc_scan_work),
-                               GFP_ATOMIC);
-               if (!wrk) {
-                       scmnd->result = DID_TARGET_FAILURE << 16;
-               } else {
-                       wrk->host = host;
-                       wrk->lun = vm_srb->lun;
-                       INIT_WORK(&wrk->work, storvsc_remove_lun);
-                       schedule_work(&wrk->work);
-               }
-       }
-
        if (scmnd->result) {
                if (scsi_normalize_sense(scmnd->sense_buffer,
                                SCSI_SENSE_BUFFERSIZE, &sense_hdr))
                        scsi_print_sense_hdr("storvsc", &sense_hdr);
        }
 
+       if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+               storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+                                        sense_hdr.ascq);
+
        scsi_set_resid(scmnd,
                cmd_request->data_buffer.len -
                vm_srb->data_transfer_length);
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
 
        blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
 
+       sdevice->no_write_same = 1;
+
        return 0;
 }
 
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
        u8 scsi_op = scmnd->cmnd[0];
 
        switch (scsi_op) {
+       /* the host does not handle WRITE_SAME, log accident usage */
+       case WRITE_SAME:
        /*
         * smartd sends this command and the host does not handle
         * this. So, don't send it.
index 8f27f9d..0371047 100644 (file)
@@ -2,48 +2,58 @@
 # Kernel configuration file for the UFS Host Controller
 #
 # This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011  Samsung Samsung India Software Operations
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+#      Santosh Yaraganavi <santosh.sy@samsung.com>
+#      Vinayak Holikatti <h.vinayak@samsung.com>
 #
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
-
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
 
-# NO WARRANTY
-# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
-# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
-# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
-# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
-# solely responsible for determining the appropriateness of using and
-# distributing the Program and assumes all risks associated with its
-# exercise of rights under this Agreement, including but not limited to
-# the risks and costs of program errors, damage to or loss of data,
-# programs or equipment, and unavailability or interruption of operations.
-
-# DISCLAIMER OF LIABILITY
-# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
-# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+config SCSI_UFSHCD
+       tristate "Universal Flash Storage Controller Driver Core"
+       depends on SCSI
+       ---help---
+       This selects the support for UFS devices in Linux, say Y and make
+         sure that you know the name of your UFS host adapter (the card
+         inside your computer that "speaks" the UFS protocol, also
+         called UFS Host Controller), because you will be asked for it.
+         The module will be called ufshcd.
 
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
-# USA.
+         To compile this driver as a module, choose M here and read
+         <file:Documentation/scsi/ufs.txt>.
+         However, do not compile this as a module if your root file system
+         (the one containing the directory /) is located on a UFS device.
 
-config SCSI_UFSHCD
-       tristate "Universal Flash Storage host controller driver"
-       depends on PCI && SCSI
+config SCSI_UFSHCD_PCI
+       tristate "PCI bus based UFS Controller support"
+       depends on SCSI_UFSHCD && PCI
        ---help---
-       This is a generic driver which supports PCIe UFS Host controllers.
+       This selects the PCI UFS Host Controller Interface. Select this if
+       you have UFS Host Controller with PCI Interface.
+
+         If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
index adf7895..9eda0df 100644 (file)
@@ -1,2 +1,3 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
index b207529..139bc06 100644 (file)
@@ -2,45 +2,35 @@
  * Universal Flash Storage Host controller driver
  *
  * This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
  *
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
  */
 
 #ifndef _UFS_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644 (file)
index 0000000..5cb1d75
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       /*
+        * TODO:
+        * 1. Call ufshcd_suspend
+        * 2. Do bus specific power management
+        */
+
+       return -ENOSYS;
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_resume(struct pci_dev *pdev)
+{
+       /*
+        * TODO:
+        * 1. Call ufshcd_resume.
+        * 2. Do bus specific wake up
+        */
+
+       return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+       ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ *             data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+       struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+       disable_irq(pdev->irq);
+       free_irq(pdev->irq, hba);
+       ufshcd_remove(hba);
+       pci_release_regions(pdev);
+       pci_set_drvdata(pdev, NULL);
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ *                      addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct pci_dev *pdev)
+{
+       int err;
+
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+               && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+               return 0;
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       return err;
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct ufs_hba *hba;
+       void __iomem *mmio_base;
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               goto out_error;
+       }
+
+       pci_set_master(pdev);
+
+
+       err = pci_request_regions(pdev, UFSHCD);
+       if (err < 0) {
+               dev_err(&pdev->dev, "request regions failed\n");
+               goto out_disable;
+       }
+
+       mmio_base = pci_ioremap_bar(pdev, 0);
+       if (!mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       err = ufshcd_set_dma_mask(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+       if (err) {
+               dev_err(&pdev->dev, "Initialization failed\n");
+               goto out_iounmap;
+       }
+
+       pci_set_drvdata(pdev, hba);
+
+       return 0;
+
+out_iounmap:
+       iounmap(mmio_base);
+out_release_regions:
+       pci_release_regions(pdev);
+out_disable:
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+out_error:
+       return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+       { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+       { }     /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+       .name = UFSHCD,
+       .id_table = ufshcd_pci_tbl,
+       .probe = ufshcd_pci_probe,
+       .remove = ufshcd_pci_remove,
+       .shutdown = ufshcd_pci_shutdown,
+#ifdef CONFIG_PM
+       .suspend = ufshcd_pci_suspend,
+       .resume = ufshcd_pci_resume,
+#endif
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 91a4046..60fd40c 100644 (file)
@@ -1,77 +1,39 @@
 /*
- * Universal Flash Storage Host controller driver
+ * Universal Flash Storage Host controller driver Core
  *
  * This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
  *
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.1"
+#include "ufshcd.h"
 
 enum {
        UFSHCD_MAX_CHANNEL      = 0,
@@ -101,121 +63,6 @@ enum {
        INT_AGGR_CONFIG,
 };
 
-/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- */
-struct uic_command {
-       u32 command;
-       u32 argument1;
-       u32 argument2;
-       u32 argument3;
-       int cmd_active;
-       int result;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @pdev: PCI device handle
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @active_uic_cmd: handle of active UIC command
- * @ufshcd_tm_wait_queue: wait queue for task management
- * @tm_condition: condition variable for task management
- * @ufshcd_state: UFSHCD states
- * @int_enable_mask: Interrupt Mask Bits
- * @uic_workq: Work queue for UIC completion handling
- * @feh_workq: Work queue for fatal controller error handling
- * @errors: HBA errors
- */
-struct ufs_hba {
-       void __iomem *mmio_base;
-
-       /* Virtual memory reference */
-       struct utp_transfer_cmd_desc *ucdl_base_addr;
-       struct utp_transfer_req_desc *utrdl_base_addr;
-       struct utp_task_req_desc *utmrdl_base_addr;
-
-       /* DMA memory reference */
-       dma_addr_t ucdl_dma_addr;
-       dma_addr_t utrdl_dma_addr;
-       dma_addr_t utmrdl_dma_addr;
-
-       struct Scsi_Host *host;
-       struct pci_dev *pdev;
-
-       struct ufshcd_lrb *lrb;
-
-       unsigned long outstanding_tasks;
-       unsigned long outstanding_reqs;
-
-       u32 capabilities;
-       int nutrs;
-       int nutmrs;
-       u32 ufs_version;
-
-       struct uic_command active_uic_cmd;
-       wait_queue_head_t ufshcd_tm_wait_queue;
-       unsigned long tm_condition;
-
-       u32 ufshcd_state;
-       u32 int_enable_mask;
-
-       /* Work Queues */
-       struct work_struct uic_workq;
-       struct work_struct feh_workq;
-
-       /* HBA Errors */
-       u32 errors;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- */
-struct ufshcd_lrb {
-       struct utp_transfer_req_desc *utr_descriptor_ptr;
-       struct utp_upiu_cmd *ucd_cmd_ptr;
-       struct utp_upiu_rsp *ucd_rsp_ptr;
-       struct ufshcd_sg_entry *ucd_prdt_ptr;
-
-       struct scsi_cmnd *cmd;
-       u8 *sense_buffer;
-       unsigned int sense_bufflen;
-       int scsi_status;
-
-       int command_type;
-       int task_tag;
-       unsigned int lun;
-};
-
 /**
  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  * @hba - Pointer to adapter instance
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
 
        if (hba->utmrdl_base_addr) {
                utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
-               dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+               dma_free_coherent(hba->dev, utmrdl_size,
                                  hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
        }
 
        if (hba->utrdl_base_addr) {
                utrdl_size =
                (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
-               dma_free_coherent(&hba->pdev->dev, utrdl_size,
+               dma_free_coherent(hba->dev, utrdl_size,
                                  hba->utrdl_base_addr, hba->utrdl_dma_addr);
        }
 
        if (hba->ucdl_base_addr) {
                ucdl_size =
                (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
-               dma_free_coherent(&hba->pdev->dev, ucdl_size,
+               dma_free_coherent(hba->dev, ucdl_size,
                                  hba->ucdl_base_addr, hba->ucdl_dma_addr);
        }
 }
@@ -428,15 +275,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
                REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
 }
 
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
-       writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
-}
-
 /**
  * ufshcd_hba_start - Start controller initialization sequence
  * @hba: per adapter instance
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
 
        /* Allocate memory for UTP command descriptors */
        ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
-       hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+       hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
                                                 ucdl_size,
                                                 &hba->ucdl_dma_addr,
                                                 GFP_KERNEL);
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
         */
        if (!hba->ucdl_base_addr ||
            WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Command Descriptor Memory allocation failed\n");
                goto out;
        }
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
         * UFSHCI requires 1024 byte alignment of UTRD
         */
        utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
-       hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+       hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
                                                  utrdl_size,
                                                  &hba->utrdl_dma_addr,
                                                  GFP_KERNEL);
        if (!hba->utrdl_base_addr ||
            WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Transfer Descriptor Memory allocation failed\n");
                goto out;
        }
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
         * UFSHCI requires 1024 byte alignment of UTMRD
         */
        utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
-       hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+       hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
                                                   utmrdl_size,
                                                   &hba->utmrdl_dma_addr,
                                                   GFP_KERNEL);
        if (!hba->utmrdl_base_addr ||
            WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                "Task Management Descriptor Memory allocation failed\n");
                goto out;
        }
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
        /* Allocate memory for local reference block */
        hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
        if (!hba->lrb) {
-               dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+               dev_err(hba->dev, "LRB Memory allocation failed\n");
                goto out;
        }
        return 0;
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
        /* check if controller is ready to accept UIC commands */
        if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
            UIC_COMMAND_READY) == 0x0) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Controller not ready"
                        " to accept UIC commands\n");
                return -EIO;
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
        /* check if device present */
        reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
        if (!ufshcd_is_device_present(reg)) {
-               dev_err(&hba->pdev->dev, "cc: Device not present\n");
+               dev_err(hba->dev, "cc: Device not present\n");
                err = -ENXIO;
                goto out;
        }
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
        if (!(ufshcd_get_lists_status(reg))) {
                ufshcd_enable_run_stop_reg(hba);
        } else {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Host controller not ready to process requests");
                err = -EIO;
                goto out;
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
                if (retry) {
                        retry--;
                } else {
-                       dev_err(&hba->pdev->dev,
+                       dev_err(hba->dev,
                                "Controller enable failed\n");
                        return -EIO;
                }
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
 
        /* start the initialization process */
        if (ufshcd_initialize_hba(hba)) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Reset: Controller initialization failed\n");
                return FAILED;
        }
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
                        task_result = SUCCESS;
        } else {
                task_result = FAILED;
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "trc: Invalid ocs = %x\n", ocs_value);
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                /* check if the returned transfer response is valid */
                result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
                if (result) {
-                       dev_err(&hba->pdev->dev,
+                       dev_err(hba->dev,
                                "Invalid response = %x\n", result);
                        break;
                }
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        case OCS_FATAL_ERROR:
        default:
                result |= DID_ERROR << 16;
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                "OCS error from controller = %x\n", ocs);
                break;
        } /* end of switch */
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work)
            !(ufshcd_get_uic_cmd_result(hba))) {
 
                if (ufshcd_make_hba_operational(hba))
-                       dev_err(&hba->pdev->dev,
+                       dev_err(hba->dev,
                                "cc: hba not operational state\n");
                return;
        }
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        free_slot = ufshcd_get_tm_free_slot(hba);
        if (free_slot >= hba->nutmrs) {
                spin_unlock_irqrestore(host->host_lock, flags);
-               dev_err(&hba->pdev->dev, "Task management queue full\n");
+               dev_err(hba->dev, "Task management queue full\n");
                err = FAILED;
                goto out;
        }
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
                                         &hba->tm_condition) != 0),
                                         60 * HZ);
        if (!err) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Task management command timed-out\n");
                err = FAILED;
                goto out;
@@ -1687,24 +1525,14 @@ static struct scsi_host_template ufshcd_driver_template = {
        .can_queue              = UFSHCD_CAN_QUEUE,
 };
 
-/**
- * ufshcd_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_shutdown(struct pci_dev *pdev)
-{
-       ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-#ifdef CONFIG_PM
 /**
  * ufshcd_suspend - suspend power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
  * @state: power state
  *
  * Returns -ENOSYS
  */
-static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
 {
        /*
         * TODO:
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
 
        return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(ufshcd_suspend);
 
 /**
  * ufshcd_resume - resume power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
  *
  * Returns -ENOSYS
  */
-static int ufshcd_resume(struct pci_dev *pdev)
+int ufshcd_resume(struct ufs_hba *hba)
 {
        /*
         * TODO:
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev)
 
        return -ENOSYS;
 }
-#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ufshcd_resume);
 
 /**
  * ufshcd_hba_free - free allocated memory for
@@ -1748,107 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba)
 {
        iounmap(hba->mmio_base);
        ufshcd_free_hba_memory(hba);
-       pci_release_regions(hba->pdev);
 }
 
 /**
- * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * ufshcd_remove - de-allocate SCSI host and host memory space
  *             data structure memory
- * @pdev - pointer to PCI handle
+ * @hba - per adapter instance
  */
-static void ufshcd_remove(struct pci_dev *pdev)
+void ufshcd_remove(struct ufs_hba *hba)
 {
-       struct ufs_hba *hba = pci_get_drvdata(pdev);
-
        /* disable interrupts */
        ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
-       free_irq(pdev->irq, hba);
 
        ufshcd_hba_stop(hba);
        ufshcd_hba_free(hba);
 
        scsi_remove_host(hba->host);
        scsi_host_put(hba->host);
-       pci_set_drvdata(pdev, NULL);
-       pci_clear_master(pdev);
-       pci_disable_device(pdev);
-}
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- *                      addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
-       int err;
-       u64 dma_mask;
-
-       /*
-        * If controller supports 64 bit addressing mode, then set the DMA
-        * mask to 64-bit, else set the DMA mask to 32-bit
-        */
-       if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
-               dma_mask = DMA_BIT_MASK(64);
-       else
-               dma_mask = DMA_BIT_MASK(32);
-
-       err = pci_set_dma_mask(hba->pdev, dma_mask);
-       if (err)
-               return err;
-
-       err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
-
-       return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_remove);
 
 /**
- * ufshcd_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
+ * ufshcd_init - Driver initialization routine
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
  * Returns 0 on success, non-zero value on failure
  */
-static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
+                void __iomem *mmio_base, unsigned int irq)
 {
        struct Scsi_Host *host;
        struct ufs_hba *hba;
        int err;
 
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_device failed\n");
+       if (!dev) {
+               dev_err(dev,
+               "Invalid memory reference for dev is NULL\n");
+               err = -ENODEV;
                goto out_error;
        }
 
-       pci_set_master(pdev);
+       if (!mmio_base) {
+               dev_err(dev,
+               "Invalid memory reference for mmio_base is NULL\n");
+               err = -ENODEV;
+               goto out_error;
+       }
 
        host = scsi_host_alloc(&ufshcd_driver_template,
                                sizeof(struct ufs_hba));
        if (!host) {
-               dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+               dev_err(dev, "scsi_host_alloc failed\n");
                err = -ENOMEM;
-               goto out_disable;
+               goto out_error;
        }
        hba = shost_priv(host);
-
-       err = pci_request_regions(pdev, UFSHCD);
-       if (err < 0) {
-               dev_err(&pdev->dev, "request regions failed\n");
-               goto out_host_put;
-       }
-
-       hba->mmio_base = pci_ioremap_bar(pdev, 0);
-       if (!hba->mmio_base) {
-               dev_err(&pdev->dev, "memory map failed\n");
-               err = -ENOMEM;
-               goto out_release_regions;
-       }
-
        hba->host = host;
-       hba->pdev = pdev;
+       hba->dev = dev;
+       hba->mmio_base = mmio_base;
+       hba->irq = irq;
 
        /* Read capabilities registers */
        ufshcd_hba_capabilities(hba);
@@ -1856,17 +1645,11 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
 
-       err = ufshcd_set_dma_mask(hba);
-       if (err) {
-               dev_err(&pdev->dev, "set dma mask failed\n");
-               goto out_iounmap;
-       }
-
        /* Allocate memory for host memory space */
        err = ufshcd_memory_alloc(hba);
        if (err) {
-               dev_err(&pdev->dev, "Memory allocation failed\n");
-               goto out_iounmap;
+               dev_err(hba->dev, "Memory allocation failed\n");
+               goto out_disable;
        }
 
        /* Configure LRB */
@@ -1888,76 +1671,50 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
 
        /* IRQ registration */
-       err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+       err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
-               dev_err(&pdev->dev, "request irq failed\n");
+               dev_err(hba->dev, "request irq failed\n");
                goto out_lrb_free;
        }
 
        /* Enable SCSI tag mapping */
        err = scsi_init_shared_tag_map(host, host->can_queue);
        if (err) {
-               dev_err(&pdev->dev, "init shared queue failed\n");
+               dev_err(hba->dev, "init shared queue failed\n");
                goto out_free_irq;
        }
 
-       pci_set_drvdata(pdev, hba);
-
-       err = scsi_add_host(host, &pdev->dev);
+       err = scsi_add_host(host, hba->dev);
        if (err) {
-               dev_err(&pdev->dev, "scsi_add_host failed\n");
+               dev_err(hba->dev, "scsi_add_host failed\n");
                goto out_free_irq;
        }
 
        /* Initialization routine */
        err = ufshcd_initialize_hba(hba);
        if (err) {
-               dev_err(&pdev->dev, "Initialization failed\n");
-               goto out_free_irq;
+               dev_err(hba->dev, "Initialization failed\n");
+               goto out_remove_scsi_host;
        }
+       *hba_handle = hba;
 
        return 0;
 
+out_remove_scsi_host:
+       scsi_remove_host(hba->host);
 out_free_irq:
-       free_irq(pdev->irq, hba);
+       free_irq(irq, hba);
 out_lrb_free:
        ufshcd_free_hba_memory(hba);
-out_iounmap:
-       iounmap(hba->mmio_base);
-out_release_regions:
-       pci_release_regions(pdev);
-out_host_put:
-       scsi_host_put(host);
 out_disable:
-       pci_clear_master(pdev);
-       pci_disable_device(pdev);
+       scsi_host_put(host);
 out_error:
        return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_init);
 
-static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
-       { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-       { }     /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
-       .name = UFSHCD,
-       .id_table = ufshcd_pci_tbl,
-       .probe = ufshcd_probe,
-       .remove = ufshcd_remove,
-       .shutdown = ufshcd_shutdown,
-#ifdef CONFIG_PM
-       .suspend = ufshcd_suspend,
-       .resume = ufshcd_resume,
-#endif
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
-             "Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644 (file)
index 0000000..6b99a42
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+       u32 command;
+       u32 argument1;
+       u32 argument2;
+       u32 argument3;
+       int cmd_active;
+       int result;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+       struct utp_transfer_req_desc *utr_descriptor_ptr;
+       struct utp_upiu_cmd *ucd_cmd_ptr;
+       struct utp_upiu_rsp *ucd_rsp_ptr;
+       struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+       struct scsi_cmnd *cmd;
+       u8 *sense_buffer;
+       unsigned int sense_bufflen;
+       int scsi_status;
+
+       int command_type;
+       int task_tag;
+       unsigned int lun;
+};
+
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+       void __iomem *mmio_base;
+
+       /* Virtual memory reference */
+       struct utp_transfer_cmd_desc *ucdl_base_addr;
+       struct utp_transfer_req_desc *utrdl_base_addr;
+       struct utp_task_req_desc *utmrdl_base_addr;
+
+       /* DMA memory reference */
+       dma_addr_t ucdl_dma_addr;
+       dma_addr_t utrdl_dma_addr;
+       dma_addr_t utmrdl_dma_addr;
+
+       struct Scsi_Host *host;
+       struct device *dev;
+
+       struct ufshcd_lrb *lrb;
+
+       unsigned long outstanding_tasks;
+       unsigned long outstanding_reqs;
+
+       u32 capabilities;
+       int nutrs;
+       int nutmrs;
+       u32 ufs_version;
+       unsigned int irq;
+
+       struct uic_command active_uic_cmd;
+       wait_queue_head_t ufshcd_tm_wait_queue;
+       unsigned long tm_condition;
+
+       u32 ufshcd_state;
+       u32 int_enable_mask;
+
+       /* Work Queues */
+       struct work_struct uic_workq;
+       struct work_struct feh_workq;
+
+       /* HBA Errors */
+       u32 errors;
+};
+
+int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
+                       unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+       writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+#endif /* End of Header */
index 6e3510f..0c16484 100644 (file)
@@ -2,45 +2,35 @@
  * Universal Flash Storage Host controller driver
  *
  * This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
  *
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
  */
 
 #ifndef _UFSHCI_H
index f2b9491..562ff9d 100644 (file)
@@ -1,4 +1 @@
-header-y += scsi_netlink.h
-header-y += scsi_netlink_fc.h
-header-y += scsi_bsg_fc.h
 header-y += fc/
index 5660381..e69de29 100644 (file)
@@ -1,4 +0,0 @@
-header-y += fc_els.h
-header-y += fc_fs.h
-header-y += fc_gs.h
-header-y += fc_ns.h
index 604cb9b..7e23148 100644 (file)
@@ -34,7 +34,8 @@ struct fcoe_sysfs_function_template {
        void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
        void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
        void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
-       void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+       void (*set_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+       int  (*set_fcoe_ctlr_enabled)(struct fcoe_ctlr_device *);
        void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
        void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
 };
@@ -48,6 +49,12 @@ enum fip_conn_type {
        FIP_CONN_TYPE_VN2VN,
 };
 
+enum ctlr_enabled_state {
+       FCOE_CTLR_ENABLED,
+       FCOE_CTLR_DISABLED,
+       FCOE_CTLR_UNUSED,
+};
+
 struct fcoe_ctlr_device {
        u32                             id;
 
@@ -64,6 +71,8 @@ struct fcoe_ctlr_device {
        int                             fcf_dev_loss_tmo;
        enum fip_conn_type              mode;
 
+       enum ctlr_enabled_state         enabled;
+
        /* expected in host order for displaying */
        struct fcoe_fc_els_lesb         lesb;
 };
index 8742d85..4427393 100644 (file)
@@ -260,6 +260,9 @@ void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb,
                     struct net_device *netdev);
 void fcoe_wwn_to_str(u64 wwn, char *buf, int len);
 int fcoe_validate_vport_create(struct fc_vport *vport);
+int fcoe_link_speed_update(struct fc_lport *);
+void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 /**
  * is_fip_mode() - returns true if FIP mode selected.
@@ -289,8 +292,11 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip)
  * @attached:  whether this transport is already attached
  * @list:      list linkage to all attached transports
  * @match:     handler to allow the transport driver to match up a given netdev
+ * @alloc:      handler to allocate per-instance FCoE structures
+ *             (no discovery or login)
  * @create:    handler to sysfs entry of create for FCoE instances
- * @destroy:   handler to sysfs entry of destroy for FCoE instances
+ * @destroy:    handler to delete per-instance FCoE structures
+ *             (frees all memory)
  * @enable:    handler to sysfs entry of enable for FCoE instances
  * @disable:   handler to sysfs entry of disable for FCoE instances
  */
@@ -299,6 +305,7 @@ struct fcoe_transport {
        bool attached;
        struct list_head list;
        bool (*match) (struct net_device *device);
+       int (*alloc) (struct net_device *device);
        int (*create) (struct net_device *device, enum fip_state fip_mode);
        int (*destroy) (struct net_device *device);
        int (*enable) (struct net_device *device);
@@ -347,7 +354,20 @@ struct fcoe_port {
        struct timer_list     timer;
        struct work_struct    destroy_work;
        u8                    data_src_addr[ETH_ALEN];
+       struct net_device * (*get_netdev)(const struct fc_lport *lport);
 };
+
+/**
+ * fcoe_get_netdev() - Return the net device associated with a local port
+ * @lport: The local port to get the net device from
+ */
+static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport)
+{
+       struct fcoe_port *port = ((struct fcoe_port *)lport_priv(lport));
+
+       return (port->get_netdev) ? port->get_netdev(lport) : NULL;
+}
+
 void fcoe_clean_pending_queue(struct fc_lport *);
 void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
 void fcoe_queue_timer(ulong lport);
@@ -356,7 +376,7 @@ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
 
 /* FCoE Sysfs helpers */
 void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *);
 
 /**
  * struct netdev_list
@@ -372,4 +392,12 @@ struct fcoe_netdev_mapping {
 int fcoe_transport_attach(struct fcoe_transport *ft);
 int fcoe_transport_detach(struct fcoe_transport *ft);
 
+/* sysfs store handler for ctrl_control interface */
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+                              const char *buf, size_t count);
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+                               const char *buf, size_t count);
+
 #endif /* _LIBFCOE_H */
+
+
index 4908480..2b6956e 100644 (file)
@@ -873,7 +873,7 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
                                       SHOST_DIF_TYPE2_PROTECTION,
                                       SHOST_DIF_TYPE3_PROTECTION };
 
-       if (target_type > SHOST_DIF_TYPE3_PROTECTION)
+       if (target_type >= ARRAY_SIZE(cap))
                return 0;
 
        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
@@ -887,7 +887,7 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
                                       SHOST_DIX_TYPE2_PROTECTION,
                                       SHOST_DIX_TYPE3_PROTECTION };
 
-       if (target_type > SHOST_DIX_TYPE3_PROTECTION)
+       if (target_type >= ARRAY_SIZE(cap))
                return 0;
 
        return shost->prot_capabilities & cap[target_type];
index 29a87dd..75746d5 100644 (file)
@@ -1,2 +1,5 @@
 # UAPI Header export list
 header-y += fc/
+header-y += scsi_bsg_fc.h
+header-y += scsi_netlink.h
+header-y += scsi_netlink_fc.h
index aafaa5a..5ead9fa 100644 (file)
@@ -1 +1,5 @@
 # UAPI Header export list
+header-y += fc_els.h
+header-y += fc_fs.h
+header-y += fc_gs.h
+header-y += fc_ns.h