Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Nov 2011 22:32:53 +0000 (15:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Nov 2011 22:32:53 +0000 (15:32 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (45 commits)
  [SCSI] Fix block queue and elevator memory leak in scsi_alloc_sdev
  [SCSI] scsi_dh_alua: Fix the time inteval for alua rtpg commands
  [SCSI] scsi_transport_iscsi: Fix documentation os parameter
  [SCSI] mv_sas: OCZ RevoDrive3 & zDrive R4 support
  [SCSI] libfc: improve flogi retries to avoid lport stuck
  [SCSI] libfc: avoid exchanges collision during lport reset
  [SCSI] libfc: fix checking FC_TYPE_BLS
  [SCSI] edd: Treat "XPRS" host bus type the same as "PCI"
  [SCSI] isci: overriding max_concurr_spinup oem parameter by max(oem, user)
  [SCSI] isci: revert bcn filtering
  [SCSI] isci: Fix hard reset timeout conditions.
  [SCSI] isci: No need to manage the pending reset bit on pending requests.
  [SCSI] isci: Remove redundant isci_request.ttype field.
  [SCSI] isci: Fix task management for SMP, SATA and on dev remove.
  [SCSI] isci: No task_done callbacks in error handler paths.
  [SCSI] isci: Handle task request timeouts correctly.
  [SCSI] isci: Fix tag leak in tasks and terminated requests.
  [SCSI] isci: Immediately fail I/O to removed devices.
  [SCSI] isci: Lookup device references through requests in completions.
  [SCSI] ipr: add definitions for additional adapter
  ...

45 files changed:
drivers/firmware/edd.c
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/hpsa_cmd.h
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/host.c
drivers/scsi/isci/init.c
drivers/scsi/isci/port.c
drivers/scsi/isci/port.h
drivers/scsi/isci/probe_roms.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/remote_device.h
drivers/scsi/isci/request.c
drivers/scsi/isci/request.h
drivers/scsi/isci/task.c
drivers/scsi/isci/task.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/mpt2sas/mpi/mpi2.h
drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_config.c
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/pmcraid.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/st.c
include/linux/pci_ids.h

index f1b7f65..e229576 100644 (file)
@@ -151,7 +151,8 @@ edd_show_host_bus(struct edd_device *edev, char *buf)
                p += scnprintf(p, left, "\tbase_address: %x\n",
                             info->params.interface_path.isa.base_address);
        } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
-                  !strncmp(info->params.host_bus_type, "PCI", 3)) {
+                  !strncmp(info->params.host_bus_type, "PCI", 3) ||
+                  !strncmp(info->params.host_bus_type, "XPRS", 4)) {
                p += scnprintf(p, left,
                             "\t%02x:%02x.%d  channel: %u\n",
                             info->params.interface_path.pci.bus,
@@ -159,7 +160,6 @@ edd_show_host_bus(struct edd_device *edev, char *buf)
                             info->params.interface_path.pci.function,
                             info->params.interface_path.pci.channel);
        } else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
-                  !strncmp(info->params.host_bus_type, "XPRS", 4) ||
                   !strncmp(info->params.host_bus_type, "HTPT", 4)) {
                p += scnprintf(p, left,
                             "\tTBD: %llx\n",
@@ -668,7 +668,7 @@ edd_get_pci_dev(struct edd_device *edev)
 {
        struct edd_info *info = edd_dev_get_info(edev);
 
-       if (edd_dev_is_type(edev, "PCI")) {
+       if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
                return pci_get_bus_and_slot(info->params.interface_path.pci.bus,
                                     PCI_DEVFN(info->params.interface_path.pci.slot,
                                               info->params.interface_path.pci.
index 63de1c7..049ea90 100644 (file)
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.8"
+#define BNX2FC_VERSION         "1.0.9"
 
 #define PFX                    "bnx2fc: "
 
 #define REC_RETRY_COUNT                        1
 #define BNX2FC_NUM_ERR_BITS            63
 
+#define BNX2FC_RELOGIN_WAIT_TIME       200
+#define BNX2FC_RELOGIN_WAIT_CNT                10
+
 /* bnx2fc driver uses only one instance of fcoe_percpu_s */
 extern struct fcoe_percpu_s bnx2fc_global;
 
index fd382fe..ce0ce3e 100644 (file)
@@ -268,17 +268,6 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
 
        orig_io_req = cb_arg->aborted_io_req;
        srr_req = cb_arg->io_req;
-       if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
-               BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
-                       orig_io_req->xid);
-               goto srr_compl_done;
-       }
-       if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
-               BNX2FC_IO_DBG(srr_req, "rec abts in prog "
-                      "orig_io - 0x%x\n",
-                       orig_io_req->xid);
-               goto srr_compl_done;
-       }
        if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
                /* SRR timedout */
                BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
@@ -290,6 +279,12 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
                                "failed. issue cleanup\n");
                        bnx2fc_initiate_cleanup(srr_req);
                }
+               if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+                   test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+                       BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
+                                     orig_io_req->xid, orig_io_req->req_flags);
+                       goto srr_compl_done;
+               }
                orig_io_req->srr_retry++;
                if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
                        struct bnx2fc_rport *tgt = orig_io_req->tgt;
@@ -311,6 +306,12 @@ void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
                }
                goto srr_compl_done;
        }
+       if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+           test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+               BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
+                             orig_io_req->xid, orig_io_req->req_flags);
+               goto srr_compl_done;
+       }
        mp_req = &(srr_req->mp_req);
        fc_hdr = &(mp_req->resp_fc_hdr);
        resp_len = mp_req->resp_len;
index 85bcc4b..8c6156a 100644 (file)
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Oct 02, 2011"
+#define DRV_MODULE_RELDATE     "Oct 21, 2011"
 
 
 static char version[] __devinitdata =
index 0c64d18..84a78af 100644 (file)
@@ -1103,7 +1103,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct fc_rport_libfc_priv *rp = rport->dd_data;
        struct bnx2fc_cmd *io_req;
        struct fc_lport *lport;
+       struct fc_rport_priv *rdata;
        struct bnx2fc_rport *tgt;
+       int logo_issued;
+       int wait_cnt = 0;
        int rc = FAILED;
 
 
@@ -1192,8 +1195,40 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        } else {
                printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
                                "already in abts processing\n", io_req->xid);
+               if (cancel_delayed_work(&io_req->timeout_work))
+                       kref_put(&io_req->refcount,
+                                bnx2fc_cmd_release); /* drop timer hold */
+               bnx2fc_initiate_cleanup(io_req);
+
+               spin_unlock_bh(&tgt->tgt_lock);
+
+               wait_for_completion(&io_req->tm_done);
+
+               spin_lock_bh(&tgt->tgt_lock);
+               io_req->wait_for_comp = 0;
+               rdata = io_req->tgt->rdata;
+               logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+                                              &tgt->flags);
                kref_put(&io_req->refcount, bnx2fc_cmd_release);
                spin_unlock_bh(&tgt->tgt_lock);
+
+               if (!logo_issued) {
+                       BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+                                     tgt->flags);
+                       mutex_lock(&lport->disc.disc_mutex);
+                       lport->tt.rport_logoff(rdata);
+                       mutex_unlock(&lport->disc.disc_mutex);
+                       do {
+                               msleep(BNX2FC_RELOGIN_WAIT_TIME);
+                               /*
+                                * If session not recovered, let SCSI-ml
+                                * escalate error recovery.
+                                */
+                               if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
+                                       return FAILED;
+                       } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
+                                          &tgt->flags));
+               }
                return SUCCESS;
        }
        if (rc == FAILED) {
@@ -1275,6 +1310,8 @@ void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
                   io_req->refcount.refcount.counter, io_req->cmd_type);
        bnx2fc_scsi_done(io_req, DID_ERROR);
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+       if (io_req->wait_for_comp)
+               complete(&io_req->tm_done);
 }
 
 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
index 7c05fd9..339ea23 100644 (file)
@@ -441,7 +441,15 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
 
        spin_lock_irqsave(q->queue_lock, flags);
        sdev = q->queuedata;
-       if (sdev && sdev->scsi_dh_data)
+       if (!sdev) {
+               spin_unlock_irqrestore(q->queue_lock, flags);
+               err = SCSI_DH_NOSYS;
+               if (fn)
+                       fn(data, err);
+               return err;
+       }
+
+       if (sdev->scsi_dh_data)
                scsi_dh = sdev->scsi_dh_data->scsi_dh;
        dev = get_device(&sdev->sdev_gendev);
        if (!scsi_dh || !dev ||
index 627f4b5..fe4df2d 100644 (file)
@@ -507,7 +507,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
        int len, k, off, valid_states = 0;
        unsigned char *ucp;
        unsigned err;
-       unsigned long expiry, interval = 1;
+       unsigned long expiry, interval = 1000;
 
        expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
  retry:
@@ -734,6 +734,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
        spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
        sdev->scsi_dh_data = scsi_dh_data;
        spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+       sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
 
        return 0;
 
index 61384ee..cefbe44 100644 (file)
@@ -2347,14 +2347,11 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
                goto done;
 
        mac = fr_cb(fp)->granted_mac;
-       if (is_zero_ether_addr(mac)) {
-               /* pre-FIP */
-               if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
-                       fc_frame_free(fp);
-                       return;
-               }
-       }
-       fcoe_update_src_mac(lport, mac);
+       /* pre-FIP */
+       if (is_zero_ether_addr(mac))
+               fcoe_ctlr_recv_flogi(fip, lport, fp);
+       if (!is_zero_ether_addr(mac))
+               fcoe_update_src_mac(lport, mac);
 done:
        fc_lport_flogi_resp(seq, fp, lport);
 }
index 4f7a582..351dc0b 100644 (file)
@@ -286,6 +286,7 @@ static void scsi_host_dev_release(struct device *dev)
 {
        struct Scsi_Host *shost = dev_to_shost(dev);
        struct device *parent = dev->parent;
+       struct request_queue *q;
 
        scsi_proc_hostdir_rm(shost->hostt);
 
@@ -293,9 +294,11 @@ static void scsi_host_dev_release(struct device *dev)
                kthread_stop(shost->ehandler);
        if (shost->work_q)
                destroy_workqueue(shost->work_q);
-       if (shost->uspace_req_q) {
-               kfree(shost->uspace_req_q->queuedata);
-               scsi_free_queue(shost->uspace_req_q);
+       q = shost->uspace_req_q;
+       if (q) {
+               kfree(q->queuedata);
+               q->queuedata = NULL;
+               scsi_free_queue(q);
        }
 
        scsi_destroy_command_freelist(shost);
index bbdc9f9..e76107b 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/bitmap.h>
 #include <linux/atomic.h>
 #include <linux/kthread.h>
+#include <linux/jiffies.h>
 #include "hpsa_cmd.h"
 #include "hpsa.h"
 
@@ -127,6 +128,10 @@ static struct board_type products[] = {
 
 static int number_of_controllers;
 
+static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
+static spinlock_t lockup_detector_lock;
+static struct task_struct *hpsa_lockup_detector;
+
 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -484,6 +489,7 @@ static struct scsi_host_template hpsa_driver_template = {
 #endif
        .sdev_attrs = hpsa_sdev_attrs,
        .shost_attrs = hpsa_shost_attrs,
+       .max_sectors = 8192,
 };
 
 
@@ -566,16 +572,16 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
         * assumes h->devlock is held
         */
        int i, found = 0;
-       DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
+       DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
 
-       memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
+       memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
 
        for (i = 0; i < h->ndevices; i++) {
                if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
                        set_bit(h->dev[i]->target, lun_taken);
        }
 
-       for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
+       for (i = 0; i < HPSA_MAX_DEVICES; i++) {
                if (!test_bit(i, lun_taken)) {
                        /* *bus = 1; */
                        *target = i;
@@ -598,7 +604,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
        unsigned char addr1[8], addr2[8];
        struct hpsa_scsi_dev_t *sd;
 
-       if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
+       if (n >= HPSA_MAX_DEVICES) {
                dev_err(&h->pdev->dev, "too many devices, some will be "
                        "inaccessible.\n");
                return -1;
@@ -673,7 +679,7 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
        struct hpsa_scsi_dev_t *removed[], int *nremoved)
 {
        /* assumes h->devlock is held */
-       BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+       BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
        removed[*nremoved] = h->dev[entry];
        (*nremoved)++;
 
@@ -702,7 +708,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
        int i;
        struct hpsa_scsi_dev_t *sd;
 
-       BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+       BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
 
        sd = h->dev[entry];
        removed[*nremoved] = h->dev[entry];
@@ -814,10 +820,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
        int nadded, nremoved;
        struct Scsi_Host *sh = NULL;
 
-       added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
-               GFP_KERNEL);
-       removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
-               GFP_KERNEL);
+       added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
+       removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
 
        if (!added || !removed) {
                dev_warn(&h->pdev->dev, "out of memory in "
@@ -1338,6 +1342,22 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
        wait_for_completion(&wait);
 }
 
+static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
+       struct CommandList *c)
+{
+       unsigned long flags;
+
+       /* If controller lockup detected, fake a hardware error. */
+       spin_lock_irqsave(&h->lock, flags);
+       if (unlikely(h->lockup_detected)) {
+               spin_unlock_irqrestore(&h->lock, flags);
+               c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+       } else {
+               spin_unlock_irqrestore(&h->lock, flags);
+               hpsa_scsi_do_simple_cmd_core(h, c);
+       }
+}
+
 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
        struct CommandList *c, int data_direction)
 {
@@ -1735,7 +1755,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
        if (is_scsi_rev_5(h))
                return 0; /* p1210m doesn't need to do this. */
 
-#define MAX_MSA2XXX_ENCLOSURES 32
        if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
                dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
                        "enclosures exceeded.  Check your hardware "
@@ -1846,8 +1865,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
        int raid_ctlr_position;
        DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
 
-       currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
-               GFP_KERNEL);
+       currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
        physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
        logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
        tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
@@ -1870,6 +1888,13 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 
        /* Allocate the per device structures */
        for (i = 0; i < ndevs_to_allocate; i++) {
+               if (i >= HPSA_MAX_DEVICES) {
+                       dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
+                               "  %d devices ignored.\n", HPSA_MAX_DEVICES,
+                               ndevs_to_allocate - HPSA_MAX_DEVICES);
+                       break;
+               }
+
                currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
                if (!currentsd[i]) {
                        dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
@@ -1956,7 +1981,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                default:
                        break;
                }
-               if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+               if (ncurrent >= HPSA_MAX_DEVICES)
                        break;
        }
        adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
@@ -2048,8 +2073,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
        }
        memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
 
-       /* Need a lock as this is being allocated from the pool */
        spin_lock_irqsave(&h->lock, flags);
+       if (unlikely(h->lockup_detected)) {
+               spin_unlock_irqrestore(&h->lock, flags);
+               cmd->result = DID_ERROR << 16;
+               done(cmd);
+               return 0;
+       }
+       /* Need a lock as this is being allocated from the pool */
        c = cmd_alloc(h);
        spin_unlock_irqrestore(&h->lock, flags);
        if (c == NULL) {                        /* trouble... */
@@ -2601,7 +2632,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                c->SG[0].Len = iocommand.buf_size;
                c->SG[0].Ext = 0; /* we are not chaining*/
        }
-       hpsa_scsi_do_simple_cmd_core(h, c);
+       hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
        if (iocommand.buf_size > 0)
                hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
@@ -2724,7 +2755,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                        c->SG[i].Ext = 0;
                }
        }
-       hpsa_scsi_do_simple_cmd_core(h, c);
+       hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
        if (sg_used)
                hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
@@ -2872,6 +2903,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                        c->Request.Timeout = 0;
                        c->Request.CDB[0] = BMIC_WRITE;
                        c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+                       c->Request.CDB[7] = (size >> 8) & 0xFF;
+                       c->Request.CDB[8] = size & 0xFF;
                        break;
                case TEST_UNIT_READY:
                        c->Request.CDBLen = 6;
@@ -3091,6 +3124,7 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
        if (interrupt_not_for_us(h))
                return IRQ_NONE;
        spin_lock_irqsave(&h->lock, flags);
+       h->last_intr_timestamp = get_jiffies_64();
        while (interrupt_pending(h)) {
                raw_tag = get_next_completion(h);
                while (raw_tag != FIFO_EMPTY)
@@ -3110,6 +3144,7 @@ static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
                return IRQ_NONE;
 
        spin_lock_irqsave(&h->lock, flags);
+       h->last_intr_timestamp = get_jiffies_64();
        raw_tag = get_next_completion(h);
        while (raw_tag != FIFO_EMPTY)
                raw_tag = next_command(h);
@@ -3126,6 +3161,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
        if (interrupt_not_for_us(h))
                return IRQ_NONE;
        spin_lock_irqsave(&h->lock, flags);
+       h->last_intr_timestamp = get_jiffies_64();
        while (interrupt_pending(h)) {
                raw_tag = get_next_completion(h);
                while (raw_tag != FIFO_EMPTY) {
@@ -3146,6 +3182,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
        u32 raw_tag;
 
        spin_lock_irqsave(&h->lock, flags);
+       h->last_intr_timestamp = get_jiffies_64();
        raw_tag = get_next_completion(h);
        while (raw_tag != FIFO_EMPTY) {
                if (hpsa_tag_contains_index(raw_tag))
@@ -4090,6 +4127,149 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
        kfree(h);
 }
 
+static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
+{
+       assert_spin_locked(&lockup_detector_lock);
+       if (!hpsa_lockup_detector)
+               return;
+       if (h->lockup_detected)
+               return; /* already stopped the lockup detector */
+       list_del(&h->lockup_list);
+}
+
+/* Called when controller lockup detected. */
+static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
+{
+       struct CommandList *c = NULL;
+
+       assert_spin_locked(&h->lock);
+       /* Mark all outstanding commands as failed and complete them. */
+       while (!list_empty(list)) {
+               c = list_entry(list->next, struct CommandList, list);
+               c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+               finish_cmd(c, c->Header.Tag.lower);
+       }
+}
+
+static void controller_lockup_detected(struct ctlr_info *h)
+{
+       unsigned long flags;
+
+       assert_spin_locked(&lockup_detector_lock);
+       remove_ctlr_from_lockup_detector_list(h);
+       h->access.set_intr_mask(h, HPSA_INTR_OFF);
+       spin_lock_irqsave(&h->lock, flags);
+       h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+       spin_unlock_irqrestore(&h->lock, flags);
+       dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
+                       h->lockup_detected);
+       pci_disable_device(h->pdev);
+       spin_lock_irqsave(&h->lock, flags);
+       fail_all_cmds_on_list(h, &h->cmpQ);
+       fail_all_cmds_on_list(h, &h->reqQ);
+       spin_unlock_irqrestore(&h->lock, flags);
+}
+
+#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
+#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
+
+static void detect_controller_lockup(struct ctlr_info *h)
+{
+       u64 now;
+       u32 heartbeat;
+       unsigned long flags;
+
+       assert_spin_locked(&lockup_detector_lock);
+       now = get_jiffies_64();
+       /* If we've received an interrupt recently, we're ok. */
+       if (time_after64(h->last_intr_timestamp +
+                               (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+               return;
+
+       /*
+        * If we've already checked the heartbeat recently, we're ok.
+        * This could happen if someone sends us a signal. We
+        * otherwise don't care about signals in this thread.
+        */
+       if (time_after64(h->last_heartbeat_timestamp +
+                               (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+               return;
+
+       /* If heartbeat has not changed since we last looked, we're not ok. */
+       spin_lock_irqsave(&h->lock, flags);
+       heartbeat = readl(&h->cfgtable->HeartBeat);
+       spin_unlock_irqrestore(&h->lock, flags);
+       if (h->last_heartbeat == heartbeat) {
+               controller_lockup_detected(h);
+               return;
+       }
+
+       /* We're ok. */
+       h->last_heartbeat = heartbeat;
+       h->last_heartbeat_timestamp = now;
+}
+
+static int detect_controller_lockup_thread(void *notused)
+{
+       struct ctlr_info *h;
+       unsigned long flags;
+
+       while (1) {
+               struct list_head *this, *tmp;
+
+               schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
+               if (kthread_should_stop())
+                       break;
+               spin_lock_irqsave(&lockup_detector_lock, flags);
+               list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
+                       h = list_entry(this, struct ctlr_info, lockup_list);
+                       detect_controller_lockup(h);
+               }
+               spin_unlock_irqrestore(&lockup_detector_lock, flags);
+       }
+       return 0;
+}
+
+static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&lockup_detector_lock, flags);
+       list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
+       spin_unlock_irqrestore(&lockup_detector_lock, flags);
+}
+
+static void start_controller_lockup_detector(struct ctlr_info *h)
+{
+       /* Start the lockup detector thread if not already started */
+       if (!hpsa_lockup_detector) {
+               spin_lock_init(&lockup_detector_lock);
+               hpsa_lockup_detector =
+                       kthread_run(detect_controller_lockup_thread,
+                                               NULL, "hpsa");
+       }
+       if (!hpsa_lockup_detector) {
+               dev_warn(&h->pdev->dev,
+                       "Could not start lockup detector thread\n");
+               return;
+       }
+       add_ctlr_to_lockup_detector_list(h);
+}
+
+static void stop_controller_lockup_detector(struct ctlr_info *h)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&lockup_detector_lock, flags);
+       remove_ctlr_from_lockup_detector_list(h);
+       /* If the list of ctlr's to monitor is empty, stop the thread */
+       if (list_empty(&hpsa_ctlr_list)) {
+               kthread_stop(hpsa_lockup_detector);
+               hpsa_lockup_detector = NULL;
+       }
+       spin_unlock_irqrestore(&lockup_detector_lock, flags);
+}
+
 static int __devinit hpsa_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *ent)
 {
@@ -4127,7 +4307,6 @@ reinit_after_soft_reset:
                return -ENOMEM;
 
        h->pdev = pdev;
-       h->busy_initializing = 1;
        h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
        INIT_LIST_HEAD(&h->cmpQ);
        INIT_LIST_HEAD(&h->reqQ);
@@ -4236,7 +4415,7 @@ reinit_after_soft_reset:
 
        hpsa_hba_inquiry(h);
        hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
-       h->busy_initializing = 0;
+       start_controller_lockup_detector(h);
        return 1;
 
 clean4:
@@ -4245,7 +4424,6 @@ clean4:
        free_irq(h->intr[h->intr_mode], h);
 clean2:
 clean1:
-       h->busy_initializing = 0;
        kfree(h);
        return rc;
 }
@@ -4300,10 +4478,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
        struct ctlr_info *h;
 
        if (pci_get_drvdata(pdev) == NULL) {
-               dev_err(&pdev->dev, "unable to remove device \n");
+               dev_err(&pdev->dev, "unable to remove device\n");
                return;
        }
        h = pci_get_drvdata(pdev);
+       stop_controller_lockup_detector(h);
        hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
        hpsa_shutdown(pdev);
        iounmap(h->vaddr);
index 7f53cea..91edafb 100644 (file)
@@ -95,8 +95,6 @@ struct ctlr_info {
        unsigned long           *cmd_pool_bits;
        int                     nr_allocs;
        int                     nr_frees;
-       int                     busy_initializing;
-       int                     busy_scanning;
        int                     scan_finished;
        spinlock_t              scan_lock;
        wait_queue_head_t       scan_wait_queue;
@@ -104,8 +102,7 @@ struct ctlr_info {
        struct Scsi_Host *scsi_host;
        spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
        int ndevices; /* number of used elements in .dev[] array. */
-#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
-       struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+       struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
        /*
         * Performant mode tables.
         */
@@ -124,6 +121,11 @@ struct ctlr_info {
        unsigned char reply_pool_wraparound;
        u32 *blockFetchTable;
        unsigned char *hba_inquiry_data;
+       u64 last_intr_timestamp;
+       u32 last_heartbeat;
+       u64 last_heartbeat_timestamp;
+       u32 lockup_detected;
+       struct list_head lockup_list;
 };
 #define HPSA_ABORT_MSG 0
 #define HPSA_DEVICE_RESET_MSG 1
index 55d741b..3fd4715 100644 (file)
@@ -123,8 +123,11 @@ union u64bit {
 
 /* FIXME this is a per controller value (barf!) */
 #define HPSA_MAX_TARGETS_PER_CTLR 16
-#define HPSA_MAX_LUN 256
+#define HPSA_MAX_LUN 1024
 #define HPSA_MAX_PHYS_LUN 1024
+#define MAX_MSA2XXX_ENCLOSURES 32
+#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
+       MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */
 
 /* SCSI-3 Commands */
 #pragma pack(1)
index 73e24b4..fd860d9 100644 (file)
@@ -9122,6 +9122,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
index 6d257e0..ac84736 100644 (file)
@@ -82,6 +82,7 @@
 
 #define IPR_SUBS_DEV_ID_57B4    0x033B
 #define IPR_SUBS_DEV_ID_57B2    0x035F
+#define IPR_SUBS_DEV_ID_57C3    0x0353
 #define IPR_SUBS_DEV_ID_57C4    0x0354
 #define IPR_SUBS_DEV_ID_57C6    0x0357
 #define IPR_SUBS_DEV_ID_57CC    0x035C
index f07f30f..e7fe9c4 100644 (file)
@@ -1350,7 +1350,7 @@ static void isci_user_parameters_get(struct sci_user_parameters *u)
        u->stp_max_occupancy_timeout = stp_max_occ_to;
        u->ssp_max_occupancy_timeout = ssp_max_occ_to;
        u->no_outbound_task_timeout = no_outbound_task_to;
-       u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+       u->max_concurr_spinup = max_concurr_spinup;
 }
 
 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
@@ -1661,7 +1661,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
        ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
 
        /* Default to APC mode. */
-       ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+       ihost->oem_parameters.controller.max_concurr_spin_up = 1;
 
        /* Default to no SSC operation. */
        ihost->oem_parameters.controller.do_enable_ssc = false;
@@ -1787,7 +1787,8 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
        } else
                return -EINVAL;
 
-       if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+       if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
+           oem->controller.max_concurr_spin_up < 1)
                return -EINVAL;
 
        return 0;
@@ -1810,6 +1811,16 @@ static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
        return SCI_FAILURE_INVALID_STATE;
 }
 
+static u8 max_spin_up(struct isci_host *ihost)
+{
+       if (ihost->user_parameters.max_concurr_spinup)
+               return min_t(u8, ihost->user_parameters.max_concurr_spinup,
+                            MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+       else
+               return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
+                            MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+}
+
 static void power_control_timeout(unsigned long data)
 {
        struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1839,8 +1850,7 @@ static void power_control_timeout(unsigned long data)
                if (iphy == NULL)
                        continue;
 
-               if (ihost->power_control.phys_granted_power >=
-                   ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+               if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
                        break;
 
                ihost->power_control.requesters[i] = NULL;
@@ -1865,8 +1875,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
 {
        BUG_ON(iphy == NULL);
 
-       if (ihost->power_control.phys_granted_power <
-           ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+       if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
                ihost->power_control.phys_granted_power++;
                sci_phy_consume_power_handler(iphy);
 
index 43fe840..a97edab 100644 (file)
@@ -118,7 +118,7 @@ unsigned char phy_gen = 3;
 module_param(phy_gen, byte, 0);
 MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
 
-unsigned char max_concurr_spinup = 1;
+unsigned char max_concurr_spinup;
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
index 8e59c88..ac7f277 100644 (file)
@@ -145,48 +145,15 @@ static void sci_port_bcn_enable(struct isci_port *iport)
        }
 }
 
-/* called under sci_lock to stabilize phy:port associations */
-void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
-{
-       int i;
-
-       clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
-       wake_up(&ihost->eventq);
-
-       if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
-               struct isci_phy *iphy = iport->phy_table[i];
-
-               if (!iphy)
-                       continue;
-
-               ihost->sas_ha.notify_port_event(&iphy->sas_phy,
-                                               PORTE_BROADCAST_RCVD);
-               break;
-       }
-}
-
 static void isci_port_bc_change_received(struct isci_host *ihost,
                                         struct isci_port *iport,
                                         struct isci_phy *iphy)
 {
-       if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
-               dev_dbg(&ihost->pdev->dev,
-                       "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
-                       __func__, iphy, &iphy->sas_phy);
-               set_bit(IPORT_BCN_PENDING, &iport->flags);
-               atomic_inc(&iport->event);
-               wake_up(&ihost->eventq);
-       } else {
-               dev_dbg(&ihost->pdev->dev,
-                       "%s: isci_phy = %p, sas_phy = %p\n",
-                       __func__, iphy, &iphy->sas_phy);
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_phy = %p, sas_phy = %p\n",
+               __func__, iphy, &iphy->sas_phy);
 
-               ihost->sas_ha.notify_port_event(&iphy->sas_phy,
-                                               PORTE_BROADCAST_RCVD);
-       }
+       ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
        sci_port_bcn_enable(iport);
 }
 
@@ -278,9 +245,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
                /* check to see if this is the last phy on this port. */
                if (isci_phy->sas_phy.port &&
                    isci_phy->sas_phy.port->num_phys == 1) {
-                       atomic_inc(&isci_port->event);
-                       isci_port_bcn_enable(isci_host, isci_port);
-
                        /* change the state for all devices on this port.  The
                         * next task sent to this device will be returned as
                         * SAS_TASK_UNDELIVERED, and the scsi mid layer will
@@ -350,6 +314,34 @@ static void isci_port_stop_complete(struct isci_host *ihost,
        dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
 }
 
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+       switch (state) {
+       case SCI_PORT_READY:
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+       case SCI_PORT_SUB_CONFIGURING:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+                                     enum sci_port_states state)
+{
+       struct sci_base_state_machine *sm = &iport->sm;
+       enum sci_port_states old_state = sm->current_state_id;
+
+       if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+               iport->ready_exit = true;
+
+       sci_change_state(sm, state);
+       iport->ready_exit = false;
+}
+
 /**
  * isci_port_hard_reset_complete() - This function is called by the sci core
  *    when the hard reset complete notification has been received.
@@ -368,6 +360,26 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
        /* Save the status of the hard reset from the port. */
        isci_port->hard_reset_status = completion_status;
 
+       if (completion_status != SCI_SUCCESS) {
+
+               /* The reset failed.  The port state is now SCI_PORT_FAILED. */
+               if (isci_port->active_phy_mask == 0) {
+
+                       /* Generate the link down now to the host, since it
+                        * was intercepted by the hard reset state machine when
+                        * it really happened.
+                        */
+                       isci_port_link_down(isci_port->isci_host,
+                                           &isci_port->isci_host->phys[
+                                                  isci_port->last_active_phy],
+                                           isci_port);
+               }
+               /* Advance the port state so that link state changes will be
+               * noticed.
+               */
+               port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
+
+       }
        complete_all(&isci_port->hard_reset_complete);
 }
 
@@ -657,6 +669,8 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
        struct isci_host *ihost = iport->owning_controller;
 
        iport->active_phy_mask &= ~(1 << iphy->phy_index);
+       if (!iport->active_phy_mask)
+               iport->last_active_phy = iphy->phy_index;
 
        iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
 
@@ -683,33 +697,6 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
        }
 }
 
-static bool is_port_ready_state(enum sci_port_states state)
-{
-       switch (state) {
-       case SCI_PORT_READY:
-       case SCI_PORT_SUB_WAITING:
-       case SCI_PORT_SUB_OPERATIONAL:
-       case SCI_PORT_SUB_CONFIGURING:
-               return true;
-       default:
-               return false;
-       }
-}
-
-/* flag dummy rnc hanling when exiting a ready state */
-static void port_state_machine_change(struct isci_port *iport,
-                                     enum sci_port_states state)
-{
-       struct sci_base_state_machine *sm = &iport->sm;
-       enum sci_port_states old_state = sm->current_state_id;
-
-       if (is_port_ready_state(old_state) && !is_port_ready_state(state))
-               iport->ready_exit = true;
-
-       sci_change_state(sm, state);
-       iport->ready_exit = false;
-}
-
 /**
  * sci_port_general_link_up_handler - phy can be assigned to port?
  * @sci_port: sci_port object for which has a phy that has gone link up.
@@ -1622,7 +1609,8 @@ void sci_port_construct(struct isci_port *iport, u8 index,
        iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
        iport->physical_port_index = index;
        iport->active_phy_mask     = 0;
-       iport->ready_exit             = false;
+       iport->last_active_phy     = 0;
+       iport->ready_exit          = false;
 
        iport->owning_controller = ihost;
 
@@ -1648,7 +1636,6 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
        init_completion(&iport->start_complete);
        iport->isci_host = ihost;
        isci_port_change_state(iport, isci_freed);
-       atomic_set(&iport->event, 0);
 }
 
 /**
@@ -1676,7 +1663,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
 {
        unsigned long flags;
        enum sci_status status;
-       int idx, ret = TMF_RESP_FUNC_COMPLETE;
+       int ret = TMF_RESP_FUNC_COMPLETE;
 
        dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
                __func__, iport);
@@ -1697,8 +1684,13 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
                        "%s: iport = %p; hard reset completion\n",
                        __func__, iport);
 
-               if (iport->hard_reset_status != SCI_SUCCESS)
+               if (iport->hard_reset_status != SCI_SUCCESS) {
                        ret = TMF_RESP_FUNC_FAILED;
+
+                       dev_err(&ihost->pdev->dev,
+                               "%s: iport = %p; hard reset failed (0x%x)\n",
+                               __func__, iport, iport->hard_reset_status);
+               }
        } else {
                ret = TMF_RESP_FUNC_FAILED;
 
@@ -1718,18 +1710,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
                        "%s: iport = %p; hard reset failed "
                        "(0x%x) - driving explicit link fail for all phys\n",
                        __func__, iport, iport->hard_reset_status);
-
-               /* Down all phys in the port. */
-               spin_lock_irqsave(&ihost->scic_lock, flags);
-               for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
-                       struct isci_phy *iphy = iport->phy_table[idx];
-
-                       if (!iphy)
-                               continue;
-                       sci_phy_stop(iphy);
-                       sci_phy_start(iphy);
-               }
-               spin_unlock_irqrestore(&ihost->scic_lock, flags);
        }
        return ret;
 }
index b50ecd4..cb5ffbc 100644 (file)
@@ -77,7 +77,6 @@ enum isci_status {
 
 /**
  * struct isci_port - isci direct attached sas port object
- * @event: counts bcns and port stop events (for bcn filtering)
  * @ready_exit: several states constitute 'ready'. When exiting ready we
  *              need to take extra port-teardown actions that are
  *              skipped when exiting to another 'ready' state.
@@ -92,10 +91,6 @@ enum isci_status {
  */
 struct isci_port {
        enum isci_status status;
-       #define IPORT_BCN_BLOCKED 0
-       #define IPORT_BCN_PENDING 1
-       unsigned long flags;
-       atomic_t event;
        struct isci_host *isci_host;
        struct asd_sas_port sas_port;
        struct list_head remote_dev_list;
@@ -109,6 +104,7 @@ struct isci_port {
        u8 logical_port_index;
        u8 physical_port_index;
        u8 active_phy_mask;
+       u8 last_active_phy;
        u16 reserved_rni;
        u16 reserved_tag;
        u32 started_request_count;
index dc007e6..2c75248 100644 (file)
@@ -112,7 +112,7 @@ struct sci_user_parameters {
         * This field specifies the maximum number of direct attached devices
         * that can have power supplied to them simultaneously.
         */
-       u8 max_number_concurrent_device_spin_up;
+       u8 max_concurr_spinup;
 
        /**
         * This field specifies the number of seconds to allow a phy to consume
@@ -219,7 +219,7 @@ struct sci_bios_oem_param_block_hdr {
 struct sci_oem_params {
        struct {
                uint8_t mode_type;
-               uint8_t max_concurrent_dev_spin_up;
+               uint8_t max_concurr_spin_up;
                uint8_t do_enable_ssc;
                uint8_t reserved;
        } controller;
index fbf9ce2..b207cd3 100644 (file)
@@ -1438,88 +1438,3 @@ int isci_remote_device_found(struct domain_device *domain_dev)
 
        return status == SCI_SUCCESS ? 0 : -ENODEV;
 }
-/**
- * isci_device_is_reset_pending() - This function will check if there is any
- *    pending reset condition on the device.
- * @request: This parameter is the isci_device object.
- *
- * true if there is a reset pending for the device.
- */
-bool isci_device_is_reset_pending(
-       struct isci_host *isci_host,
-       struct isci_remote_device *isci_device)
-{
-       struct isci_request *isci_request;
-       struct isci_request *tmp_req;
-       bool reset_is_pending = false;
-       unsigned long flags;
-
-       dev_dbg(&isci_host->pdev->dev,
-               "%s: isci_device = %p\n", __func__, isci_device);
-
-       spin_lock_irqsave(&isci_host->scic_lock, flags);
-
-       /* Check for reset on all pending requests. */
-       list_for_each_entry_safe(isci_request, tmp_req,
-                                &isci_device->reqs_in_process, dev_node) {
-               dev_dbg(&isci_host->pdev->dev,
-                       "%s: isci_device = %p request = %p\n",
-                       __func__, isci_device, isci_request);
-
-               if (isci_request->ttype == io_task) {
-                       struct sas_task *task = isci_request_access_task(
-                               isci_request);
-
-                       spin_lock(&task->task_state_lock);
-                       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
-                               reset_is_pending = true;
-                       spin_unlock(&task->task_state_lock);
-               }
-       }
-
-       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
-       dev_dbg(&isci_host->pdev->dev,
-               "%s: isci_device = %p reset_is_pending = %d\n",
-               __func__, isci_device, reset_is_pending);
-
-       return reset_is_pending;
-}
-
-/**
- * isci_device_clear_reset_pending() - This function will clear if any pending
- *    reset condition flags on the device.
- * @request: This parameter is the isci_device object.
- *
- * true if there is a reset pending for the device.
- */
-void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
-{
-       struct isci_request *isci_request;
-       struct isci_request *tmp_req;
-       unsigned long flags = 0;
-
-       dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
-                __func__, idev, ihost);
-
-       spin_lock_irqsave(&ihost->scic_lock, flags);
-
-       /* Clear reset pending on all pending requests. */
-       list_for_each_entry_safe(isci_request, tmp_req,
-                                &idev->reqs_in_process, dev_node) {
-               dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
-                        __func__, idev, isci_request);
-
-               if (isci_request->ttype == io_task) {
-
-                       unsigned long flags2;
-                       struct sas_task *task = isci_request_access_task(
-                               isci_request);
-
-                       spin_lock_irqsave(&task->task_state_lock, flags2);
-                       task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
-                       spin_unlock_irqrestore(&task->task_state_lock, flags2);
-               }
-       }
-       spin_unlock_irqrestore(&ihost->scic_lock, flags);
-}
index e1747ea..483ee50 100644 (file)
@@ -132,10 +132,7 @@ void isci_remote_device_nuke_requests(struct isci_host *ihost,
                                      struct isci_remote_device *idev);
 void isci_remote_device_gone(struct domain_device *domain_dev);
 int isci_remote_device_found(struct domain_device *domain_dev);
-bool isci_device_is_reset_pending(struct isci_host *ihost,
-                                 struct isci_remote_device *idev);
-void isci_device_clear_reset_pending(struct isci_host *ihost,
-                                    struct isci_remote_device *idev);
+
 /**
  * sci_remote_device_stop() - This method will stop both transmission and
  *    reception of link activity for the supplied remote device.  This method
index 565a9f0..192cb48 100644 (file)
@@ -191,7 +191,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
 
        task_iu->task_func = isci_tmf->tmf_code;
        task_iu->task_tag =
-               (ireq->ttype == tmf_task) ?
+               (test_bit(IREQ_TMF, &ireq->flags)) ?
                isci_tmf->io_tag :
                SCI_CONTROLLER_INVALID_IO_TAG;
 }
@@ -516,7 +516,7 @@ sci_io_request_construct_sata(struct isci_request *ireq,
        struct domain_device *dev = ireq->target_device->domain_dev;
 
        /* check for management protocols */
-       if (ireq->ttype == tmf_task) {
+       if (test_bit(IREQ_TMF, &ireq->flags)) {
                struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 
                if (tmf->tmf_code == isci_tmf_sata_srst_high ||
@@ -632,7 +632,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
        enum sci_status status = SCI_SUCCESS;
 
        /* check for management protocols */
-       if (ireq->ttype == tmf_task) {
+       if (test_bit(IREQ_TMF, &ireq->flags)) {
                struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 
                if (tmf->tmf_code == isci_tmf_sata_srst_high ||
@@ -2630,14 +2630,8 @@ static void isci_task_save_for_upper_layer_completion(
        switch (task_notification_selection) {
 
        case isci_perform_normal_io_completion:
-
                /* Normal notification (task_done) */
-               dev_dbg(&host->pdev->dev,
-                       "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
-                       __func__,
-                       task,
-                       task->task_status.resp, response,
-                       task->task_status.stat, status);
+
                /* Add to the completed list. */
                list_add(&request->completed_node,
                         &host->requests_to_complete);
@@ -2650,13 +2644,6 @@ static void isci_task_save_for_upper_layer_completion(
                /* No notification to libsas because this request is
                 * already in the abort path.
                 */
-               dev_dbg(&host->pdev->dev,
-                        "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
-                        __func__,
-                        task,
-                        task->task_status.resp, response,
-                        task->task_status.stat, status);
-
                /* Wake up whatever process was waiting for this
                 * request to complete.
                 */
@@ -2673,30 +2660,22 @@ static void isci_task_save_for_upper_layer_completion(
 
        case isci_perform_error_io_completion:
                /* Use sas_task_abort */
-               dev_dbg(&host->pdev->dev,
-                        "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
-                        __func__,
-                        task,
-                        task->task_status.resp, response,
-                        task->task_status.stat, status);
                /* Add to the aborted list. */
                list_add(&request->completed_node,
                         &host->requests_to_errorback);
                break;
 
        default:
-               dev_dbg(&host->pdev->dev,
-                        "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
-                        __func__,
-                        task,
-                        task->task_status.resp, response,
-                        task->task_status.stat, status);
-
                /* Add to the error to libsas list. */
                list_add(&request->completed_node,
                         &host->requests_to_errorback);
                break;
        }
+       dev_dbg(&host->pdev->dev,
+               "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
+               __func__, task_notification_selection, task,
+               (task) ? task->task_status.resp : 0, response,
+               (task) ? task->task_status.stat : 0, status);
 }
 
 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2728,9 +2707,9 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
        struct sas_task *task = isci_request_access_task(request);
        struct ssp_response_iu *resp_iu;
        unsigned long task_flags;
-       struct isci_remote_device *idev = isci_lookup_device(task->dev);
-       enum service_response response       = SAS_TASK_UNDELIVERED;
-       enum exec_status status         = SAS_ABORTED_TASK;
+       struct isci_remote_device *idev = request->target_device;
+       enum service_response response = SAS_TASK_UNDELIVERED;
+       enum exec_status status = SAS_ABORTED_TASK;
        enum isci_request_status request_status;
        enum isci_completion_selection complete_to_host
                = isci_perform_normal_io_completion;
@@ -3061,7 +3040,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
 
        /* complete the io request to the core. */
        sci_controller_complete_io(ihost, request->target_device, request);
-       isci_put_device(idev);
 
        /* set terminated handle so it cannot be completed or
         * terminated again, and to cause any calls into abort
@@ -3080,7 +3058,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
        /* XXX as hch said always creating an internal sas_task for tmf
         * requests would simplify the driver
         */
-       task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+       task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
 
        /* all unaccelerated request types (non ssp or ncq) handled with
         * substates
@@ -3564,7 +3542,7 @@ static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
 
        ireq = isci_request_from_tag(ihost, tag);
        ireq->ttype_ptr.io_task_ptr = task;
-       ireq->ttype = io_task;
+       clear_bit(IREQ_TMF, &ireq->flags);
        task->lldd_task = ireq;
 
        return ireq;
@@ -3578,7 +3556,7 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
 
        ireq = isci_request_from_tag(ihost, tag);
        ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
-       ireq->ttype = tmf_task;
+       set_bit(IREQ_TMF, &ireq->flags);
 
        return ireq;
 }
index f720b97..be38933 100644 (file)
@@ -77,11 +77,6 @@ enum isci_request_status {
        dead        = 0x07
 };
 
-enum task_type {
-       io_task  = 0,
-       tmf_task = 1
-};
-
 enum sci_request_protocol {
        SCIC_NO_PROTOCOL,
        SCIC_SMP_PROTOCOL,
@@ -116,7 +111,6 @@ struct isci_request {
        #define IREQ_ACTIVE 3
        unsigned long flags;
        /* XXX kill ttype and ttype_ptr, allocate full sas_task */
-       enum task_type ttype;
        union ttype_ptr_union {
                struct sas_task *io_task_ptr;   /* When ttype==io_task  */
                struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
index e2d9418..66ad3dc 100644 (file)
@@ -212,16 +212,27 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
                                        task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
                                        spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-                                       /* Indicate QUEUE_FULL so that the scsi
-                                       * midlayer retries. if the request
-                                       * failed for remote device reasons,
-                                       * it gets returned as
-                                       * SAS_TASK_UNDELIVERED next time
-                                       * through.
-                                       */
-                                       isci_task_refuse(ihost, task,
-                                                        SAS_TASK_COMPLETE,
-                                                        SAS_QUEUE_FULL);
+                                       if (test_bit(IDEV_GONE, &idev->flags)) {
+
+                                               /* Indicate that the device
+                                                * is gone.
+                                                */
+                                               isci_task_refuse(ihost, task,
+                                                       SAS_TASK_UNDELIVERED,
+                                                       SAS_DEVICE_UNKNOWN);
+                                       } else {
+                                               /* Indicate QUEUE_FULL so that
+                                                * the scsi midlayer retries.
+                                                * If the request failed for
+                                                * remote device reasons, it
+                                                * gets returned as
+                                                * SAS_TASK_UNDELIVERED next
+                                                * time through.
+                                                */
+                                               isci_task_refuse(ihost, task,
+                                                       SAS_TASK_COMPLETE,
+                                                       SAS_QUEUE_FULL);
+                                       }
                                }
                        }
                }
@@ -243,7 +254,7 @@ static enum sci_status isci_sata_management_task_request_build(struct isci_reque
        struct isci_tmf *isci_tmf;
        enum sci_status status;
 
-       if (tmf_task != ireq->ttype)
+       if (!test_bit(IREQ_TMF, &ireq->flags))
                return SCI_FAILURE;
 
        isci_tmf = isci_request_access_tmf(ireq);
@@ -327,6 +338,60 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
        return ireq;
 }
 
+/**
+* isci_request_mark_zombie() - This function must be called with scic_lock held.
+*/
+static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
+{
+       struct completion *tmf_completion = NULL;
+       struct completion *req_completion;
+
+       /* Set the request state to "dead". */
+       ireq->status = dead;
+
+       req_completion = ireq->io_request_completion;
+       ireq->io_request_completion = NULL;
+
+       if (test_bit(IREQ_TMF, &ireq->flags)) {
+               /* Break links with the TMF request. */
+               struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+               /* In the case where a task request is dying,
+                * the thread waiting on the complete will sit and
+                * timeout unless we wake it now.  Since the TMF
+                * has a default error status, complete it here
+                * to wake the waiting thread.
+                */
+               if (tmf) {
+                       tmf_completion = tmf->complete;
+                       tmf->complete = NULL;
+               }
+               ireq->ttype_ptr.tmf_task_ptr = NULL;
+               dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
+                       __func__, tmf->tmf_code, tmf->io_tag);
+       } else {
+               /* Break links with the sas_task - the callback is done
+                * elsewhere.
+                */
+               struct sas_task *task = isci_request_access_task(ireq);
+
+               if (task)
+                       task->lldd_task = NULL;
+
+               ireq->ttype_ptr.io_task_ptr = NULL;
+       }
+
+       dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
+                ireq->io_tag);
+
+       /* Don't force waiting threads to timeout. */
+       if (req_completion)
+               complete(req_completion);
+
+       if (tmf_completion != NULL)
+               complete(tmf_completion);
+}
+
 static int isci_task_execute_tmf(struct isci_host *ihost,
                                 struct isci_remote_device *idev,
                                 struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -364,6 +429,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
 
        /* Assign the pointer to the TMF's completion kernel wait structure. */
        tmf->complete = &completion;
+       tmf->status = SCI_FAILURE_TIMEOUT;
 
        ireq = isci_task_request_build(ihost, idev, tag, tmf);
        if (!ireq)
@@ -399,18 +465,35 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
                                               msecs_to_jiffies(timeout_ms));
 
        if (timeleft == 0) {
+               /* The TMF did not complete - this could be because
+                * of an unplug.  Terminate the TMF request now.
+                */
                spin_lock_irqsave(&ihost->scic_lock, flags);
 
                if (tmf->cb_state_func != NULL)
-                       tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+                       tmf->cb_state_func(isci_tmf_timed_out, tmf,
+                                          tmf->cb_data);
 
-               sci_controller_terminate_request(ihost,
-                                                 idev,
-                                                 ireq);
+               sci_controller_terminate_request(ihost, idev, ireq);
 
                spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-               wait_for_completion(tmf->complete);
+               timeleft = wait_for_completion_timeout(
+                       &completion,
+                       msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
+
+               if (!timeleft) {
+                       /* Strange condition - the termination of the TMF
+                        * request timed-out.
+                        */
+                       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+                       /* If the TMF status has not changed, kill it. */
+                       if (tmf->status == SCI_FAILURE_TIMEOUT)
+                               isci_request_mark_zombie(ihost, ireq);
+
+                       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               }
        }
 
        isci_print_tmf(tmf);
@@ -501,48 +584,17 @@ static enum isci_request_status isci_task_validate_request_to_abort(
        return old_state;
 }
 
-/**
-* isci_request_cleanup_completed_loiterer() - This function will take care of
-*    the final cleanup on any request which has been explicitly terminated.
-* @isci_host: This parameter specifies the ISCI host object
-* @isci_device: This is the device to which the request is pending.
-* @isci_request: This parameter specifies the terminated request object.
-* @task: This parameter is the libsas I/O request.
-*/
-static void isci_request_cleanup_completed_loiterer(
-       struct isci_host          *isci_host,
-       struct isci_remote_device *isci_device,
-       struct isci_request       *isci_request,
-       struct sas_task           *task)
+static int isci_request_is_dealloc_managed(enum isci_request_status stat)
 {
-       unsigned long flags;
-
-       dev_dbg(&isci_host->pdev->dev,
-               "%s: isci_device=%p, request=%p, task=%p\n",
-               __func__, isci_device, isci_request, task);
-
-       if (task != NULL) {
-
-               spin_lock_irqsave(&task->task_state_lock, flags);
-               task->lldd_task = NULL;
-
-               task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
-
-               isci_set_task_doneflags(task);
-
-               /* If this task is not in the abort path, call task_done. */
-               if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-
-                       spin_unlock_irqrestore(&task->task_state_lock, flags);
-                       task->task_done(task);
-               } else
-                       spin_unlock_irqrestore(&task->task_state_lock, flags);
-       }
-
-       if (isci_request != NULL) {
-               spin_lock_irqsave(&isci_host->scic_lock, flags);
-               list_del_init(&isci_request->dev_node);
-               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       switch (stat) {
+       case aborted:
+       case aborting:
+       case terminating:
+       case completed:
+       case dead:
+               return true;
+       default:
+               return false;
        }
 }
 
@@ -563,11 +615,9 @@ static void isci_terminate_request_core(struct isci_host *ihost,
        enum sci_status status      = SCI_SUCCESS;
        bool was_terminated         = false;
        bool needs_cleanup_handling = false;
-       enum isci_request_status request_status;
        unsigned long     flags;
        unsigned long     termination_completed = 1;
        struct completion *io_request_completion;
-       struct sas_task   *task;
 
        dev_dbg(&ihost->pdev->dev,
                "%s: device = %p; request = %p\n",
@@ -577,10 +627,6 @@ static void isci_terminate_request_core(struct isci_host *ihost,
 
        io_request_completion = isci_request->io_request_completion;
 
-       task = (isci_request->ttype == io_task)
-               ? isci_request_access_task(isci_request)
-               : NULL;
-
        /* Note that we are not going to control
         * the target to abort the request.
         */
@@ -619,42 +665,27 @@ static void isci_terminate_request_core(struct isci_host *ihost,
                                __func__, isci_request, io_request_completion);
 
                        /* Wait here for the request to complete. */
-                       #define TERMINATION_TIMEOUT_MSEC 500
                        termination_completed
                                = wait_for_completion_timeout(
                                   io_request_completion,
-                                  msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+                                  msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
 
                        if (!termination_completed) {
 
                                /* The request to terminate has timed out.  */
-                               spin_lock_irqsave(&ihost->scic_lock,
-                                                 flags);
+                               spin_lock_irqsave(&ihost->scic_lock, flags);
 
                                /* Check for state changes. */
-                               if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+                               if (!test_bit(IREQ_TERMINATED,
+                                             &isci_request->flags)) {
 
                                        /* The best we can do is to have the
                                         * request die a silent death if it
                                         * ever really completes.
-                                        *
-                                        * Set the request state to "dead",
-                                        * and clear the task pointer so that
-                                        * an actual completion event callback
-                                        * doesn't do anything.
                                         */
-                                       isci_request->status = dead;
-                                       isci_request->io_request_completion
-                                               = NULL;
-
-                                       if (isci_request->ttype == io_task) {
-
-                                               /* Break links with the
-                                               * sas_task.
-                                               */
-                                               isci_request->ttype_ptr.io_task_ptr
-                                                       = NULL;
-                                       }
+                                       isci_request_mark_zombie(ihost,
+                                                                isci_request);
+                                       needs_cleanup_handling = true;
                                } else
                                        termination_completed = 1;
 
@@ -691,29 +722,28 @@ static void isci_terminate_request_core(struct isci_host *ihost,
                         * needs to be detached and freed here.
                         */
                        spin_lock_irqsave(&isci_request->state_lock, flags);
-                       request_status = isci_request->status;
-
-                       if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
-                           && ((request_status == aborted)
-                               || (request_status == aborting)
-                               || (request_status == terminating)
-                               || (request_status == completed)
-                               || (request_status == dead)
-                               )
-                           ) {
-
-                               /* The completion routine won't free a request in
-                                * the aborted/aborting/etc. states, so we do
-                                * it here.
-                                */
-                               needs_cleanup_handling = true;
-                       }
+
+                       needs_cleanup_handling
+                               = isci_request_is_dealloc_managed(
+                                       isci_request->status);
+
                        spin_unlock_irqrestore(&isci_request->state_lock, flags);
 
                }
-               if (needs_cleanup_handling)
-                       isci_request_cleanup_completed_loiterer(
-                               ihost, idev, isci_request, task);
+               if (needs_cleanup_handling) {
+
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: cleanup isci_device=%p, request=%p\n",
+                               __func__, idev, isci_request);
+
+                       if (isci_request != NULL) {
+                               spin_lock_irqsave(&ihost->scic_lock, flags);
+                               isci_free_tag(ihost, isci_request->io_tag);
+                               isci_request_change_state(isci_request, unallocated);
+                               list_del_init(&isci_request->dev_node);
+                               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+                       }
+               }
        }
 }
 
@@ -772,7 +802,9 @@ void isci_terminate_pending_requests(struct isci_host *ihost,
                dev_dbg(&ihost->pdev->dev,
                         "%s: idev=%p request=%p; task=%p old_state=%d\n",
                         __func__, idev, ireq,
-                       ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+                       (!test_bit(IREQ_TMF, &ireq->flags)
+                               ? isci_request_access_task(ireq)
+                               : NULL),
                        old_state);
 
                /* If the old_state is started:
@@ -889,22 +921,14 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
                "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
                 __func__, domain_device, isci_host, isci_device);
 
-       if (isci_device)
-               set_bit(IDEV_EH, &isci_device->flags);
+       if (!isci_device) {
+               /* If the device is gone, stop the escalations. */
+               dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
 
-       /* If there is a device reset pending on any request in the
-        * device's list, fail this LUN reset request in order to
-        * escalate to the device reset.
-        */
-       if (!isci_device ||
-           isci_device_is_reset_pending(isci_host, isci_device)) {
-               dev_dbg(&isci_host->pdev->dev,
-                        "%s: No dev (%p), or "
-                        "RESET PENDING: domain_device=%p\n",
-                        __func__, isci_device, domain_device);
-               ret = TMF_RESP_FUNC_FAILED;
+               ret = TMF_RESP_FUNC_COMPLETE;
                goto out;
        }
+       set_bit(IDEV_EH, &isci_device->flags);
 
        /* Send the task management part of the reset. */
        if (sas_protocol_ata(domain_device->tproto)) {
@@ -1013,7 +1037,7 @@ int isci_task_abort_task(struct sas_task *task)
        struct isci_tmf           tmf;
        int                       ret = TMF_RESP_FUNC_FAILED;
        unsigned long             flags;
-       bool                      any_dev_reset = false;
+       int                       perform_termination = 0;
 
        /* Get the isci_request reference from the task.  Note that
         * this check does not depend on the pending request list
@@ -1035,89 +1059,34 @@ int isci_task_abort_task(struct sas_task *task)
        spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
        dev_dbg(&isci_host->pdev->dev,
-               "%s: task = %p\n", __func__, task);
-
-       if (!isci_device || !old_request)
-               goto out;
-
-       set_bit(IDEV_EH, &isci_device->flags);
-
-       /* This version of the driver will fail abort requests for
-        * SATA/STP.  Failing the abort request this way will cause the
-        * SCSI error handler thread to escalate to LUN reset
-        */
-       if (sas_protocol_ata(task->task_proto)) {
-               dev_dbg(&isci_host->pdev->dev,
-                           " task %p is for a STP/SATA device;"
-                           " returning TMF_RESP_FUNC_FAILED\n"
-                           " to cause a LUN reset...\n", task);
-               goto out;
-       }
+               "%s: dev = %p, task = %p, old_request == %p\n",
+               __func__, isci_device, task, old_request);
 
-       dev_dbg(&isci_host->pdev->dev,
-               "%s: old_request == %p\n", __func__, old_request);
-
-       any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
-
-       spin_lock_irqsave(&task->task_state_lock, flags);
-
-       any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+       if (isci_device)
+               set_bit(IDEV_EH, &isci_device->flags);
 
-       /* If the extraction of the request reference from the task
-        * failed, then the request has been completed (or if there is a
-        * pending reset then this abort request function must be failed
-        * in order to escalate to the target reset).
+       /* Device reset conditions signalled in task_state_flags are the
+        * responsbility of libsas to observe at the start of the error
+        * handler thread.
         */
-       if ((old_request == NULL) || any_dev_reset) {
-
-               /* If the device reset task flag is set, fail the task
-                * management request.  Otherwise, the original request
-                * has completed.
-                */
-               if (any_dev_reset) {
-
-                       /* Turn off the task's DONE to make sure this
-                        * task is escalated to a target reset.
-                        */
-                       task->task_state_flags &= ~SAS_TASK_STATE_DONE;
-
-                       /* Make the reset happen as soon as possible. */
-                       task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
-
-                       spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-                       /* Fail the task management request in order to
-                        * escalate to the target reset.
-                        */
-                       ret = TMF_RESP_FUNC_FAILED;
-
-                       dev_dbg(&isci_host->pdev->dev,
-                               "%s: Failing task abort in order to "
-                               "escalate to target reset because\n"
-                               "SAS_TASK_NEED_DEV_RESET is set for "
-                               "task %p on dev %p\n",
-                               __func__, task, isci_device);
-
-
-               } else {
-                       /* The request has already completed and there
-                        * is nothing to do here other than to set the task
-                        * done bit, and indicate that the task abort function
-                        * was sucessful.
-                        */
-                       isci_set_task_doneflags(task);
-
-                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+       if (!isci_device || !old_request) {
+               /* The request has already completed and there
+               * is nothing to do here other than to set the task
+               * done bit, and indicate that the task abort function
+               * was sucessful.
+               */
+               spin_lock_irqsave(&task->task_state_lock, flags);
+               task->task_state_flags |= SAS_TASK_STATE_DONE;
+               task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+                                           SAS_TASK_STATE_PENDING);
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-                       ret = TMF_RESP_FUNC_COMPLETE;
+               ret = TMF_RESP_FUNC_COMPLETE;
 
-                       dev_dbg(&isci_host->pdev->dev,
-                               "%s: abort task not needed for %p\n",
-                               __func__, task);
-               }
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: abort task not needed for %p\n",
+                       __func__, task);
                goto out;
-       } else {
-               spin_unlock_irqrestore(&task->task_state_lock, flags);
        }
 
        spin_lock_irqsave(&isci_host->scic_lock, flags);
@@ -1146,24 +1115,44 @@ int isci_task_abort_task(struct sas_task *task)
                goto out;
        }
        if (task->task_proto == SAS_PROTOCOL_SMP ||
+           sas_protocol_ata(task->task_proto) ||
            test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
 
                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
                dev_dbg(&isci_host->pdev->dev,
-                       "%s: SMP request (%d)"
+                       "%s: %s request"
                        " or complete_in_target (%d), thus no TMF\n",
-                       __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+                       __func__,
+                       ((task->task_proto == SAS_PROTOCOL_SMP)
+                               ? "SMP"
+                               : (sas_protocol_ata(task->task_proto)
+                                       ? "SATA/STP"
+                                       : "<other>")
+                        ),
                        test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
 
-               /* Set the state on the task. */
-               isci_task_all_done(task);
-
-               ret = TMF_RESP_FUNC_COMPLETE;
+               if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+                                                   SAS_TASK_STATE_PENDING);
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+                       ret = TMF_RESP_FUNC_COMPLETE;
+               } else {
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+                                                   SAS_TASK_STATE_PENDING);
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+               }
 
-               /* Stopping and SMP devices are not sent a TMF, and are not
-                * reset, but the outstanding I/O request is terminated below.
+               /* STP and SMP devices are not sent a TMF, but the
+                * outstanding I/O request is terminated below.  This is
+                * because SATA/STP and SMP discovery path timeouts directly
+                * call the abort task interface for cleanup.
                 */
+               perform_termination = 1;
+
        } else {
                /* Fill in the tmf stucture */
                isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
@@ -1172,22 +1161,24 @@ int isci_task_abort_task(struct sas_task *task)
 
                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
-               #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+               #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
                ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
                                            ISCI_ABORT_TASK_TIMEOUT_MS);
 
-               if (ret != TMF_RESP_FUNC_COMPLETE)
+               if (ret == TMF_RESP_FUNC_COMPLETE)
+                       perform_termination = 1;
+               else
                        dev_dbg(&isci_host->pdev->dev,
-                               "%s: isci_task_send_tmf failed\n",
-                               __func__);
+                               "%s: isci_task_send_tmf failed\n", __func__);
        }
-       if (ret == TMF_RESP_FUNC_COMPLETE) {
+       if (perform_termination) {
                set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
 
                /* Clean up the request on our side, and wait for the aborted
                 * I/O to complete.
                 */
-               isci_terminate_request_core(isci_host, isci_device, old_request);
+               isci_terminate_request_core(isci_host, isci_device,
+                                           old_request);
        }
 
        /* Make sure we do not leave a reference to aborted_io_completion */
@@ -1288,7 +1279,8 @@ isci_task_request_complete(struct isci_host *ihost,
                           enum sci_task_status completion_status)
 {
        struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-       struct completion *tmf_complete;
+       struct completion *tmf_complete = NULL;
+       struct completion *request_complete = ireq->io_request_completion;
 
        dev_dbg(&ihost->pdev->dev,
                "%s: request = %p, status=%d\n",
@@ -1296,255 +1288,53 @@ isci_task_request_complete(struct isci_host *ihost,
 
        isci_request_change_state(ireq, completed);
 
-       tmf->status = completion_status;
        set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
 
-       if (tmf->proto == SAS_PROTOCOL_SSP) {
-               memcpy(&tmf->resp.resp_iu,
-                      &ireq->ssp.rsp,
-                      SSP_RESP_IU_MAX_SIZE);
-       } else if (tmf->proto == SAS_PROTOCOL_SATA) {
-               memcpy(&tmf->resp.d2h_fis,
-                      &ireq->stp.rsp,
-                      sizeof(struct dev_to_host_fis));
+       if (tmf) {
+               tmf->status = completion_status;
+
+               if (tmf->proto == SAS_PROTOCOL_SSP) {
+                       memcpy(&tmf->resp.resp_iu,
+                              &ireq->ssp.rsp,
+                              SSP_RESP_IU_MAX_SIZE);
+               } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+                       memcpy(&tmf->resp.d2h_fis,
+                              &ireq->stp.rsp,
+                              sizeof(struct dev_to_host_fis));
+               }
+               /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+               tmf_complete = tmf->complete;
        }
-
-       /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
-       tmf_complete = tmf->complete;
-
        sci_controller_complete_io(ihost, ireq->target_device, ireq);
        /* set the 'terminated' flag handle to make sure it cannot be terminated
         *  or completed again.
         */
        set_bit(IREQ_TERMINATED, &ireq->flags);
 
-       isci_request_change_state(ireq, unallocated);
-       list_del_init(&ireq->dev_node);
-
-       /* The task management part completes last. */
-       complete(tmf_complete);
-}
-
-static void isci_smp_task_timedout(unsigned long _task)
-{
-       struct sas_task *task = (void *) _task;
-       unsigned long flags;
-
-       spin_lock_irqsave(&task->task_state_lock, flags);
-       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
-               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-       spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-       complete(&task->completion);
-}
-
-static void isci_smp_task_done(struct sas_task *task)
-{
-       if (!del_timer(&task->timer))
-               return;
-       complete(&task->completion);
-}
-
-static int isci_smp_execute_task(struct isci_host *ihost,
-                                struct domain_device *dev, void *req,
-                                int req_size, void *resp, int resp_size)
-{
-       int res, retry;
-       struct sas_task *task = NULL;
-
-       for (retry = 0; retry < 3; retry++) {
-               task = sas_alloc_task(GFP_KERNEL);
-               if (!task)
-                       return -ENOMEM;
-
-               task->dev = dev;
-               task->task_proto = dev->tproto;
-               sg_init_one(&task->smp_task.smp_req, req, req_size);
-               sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
-
-               task->task_done = isci_smp_task_done;
-
-               task->timer.data = (unsigned long) task;
-               task->timer.function = isci_smp_task_timedout;
-               task->timer.expires = jiffies + 10*HZ;
-               add_timer(&task->timer);
-
-               res = isci_task_execute_task(task, 1, GFP_KERNEL);
-
-               if (res) {
-                       del_timer(&task->timer);
-                       dev_dbg(&ihost->pdev->dev,
-                               "%s: executing SMP task failed:%d\n",
-                               __func__, res);
-                       goto ex_err;
-               }
-
-               wait_for_completion(&task->completion);
-               res = -ECOMM;
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       dev_dbg(&ihost->pdev->dev,
-                               "%s: smp task timed out or aborted\n",
-                               __func__);
-                       isci_task_abort_task(task);
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               dev_dbg(&ihost->pdev->dev,
-                                       "%s: SMP task aborted and not done\n",
-                                       __func__);
-                               goto ex_err;
-                       }
-               }
-               if (task->task_status.resp == SAS_TASK_COMPLETE &&
-                   task->task_status.stat == SAM_STAT_GOOD) {
-                       res = 0;
-                       break;
-               }
-               if (task->task_status.resp == SAS_TASK_COMPLETE &&
-                     task->task_status.stat == SAS_DATA_UNDERRUN) {
-                       /* no error, but return the number of bytes of
-                       * underrun */
-                       res = task->task_status.residual;
-                       break;
-               }
-               if (task->task_status.resp == SAS_TASK_COMPLETE &&
-                     task->task_status.stat == SAS_DATA_OVERRUN) {
-                       res = -EMSGSIZE;
-                       break;
-               } else {
-                       dev_dbg(&ihost->pdev->dev,
-                               "%s: task to dev %016llx response: 0x%x "
-                               "status 0x%x\n", __func__,
-                               SAS_ADDR(dev->sas_addr),
-                               task->task_status.resp,
-                               task->task_status.stat);
-                       sas_free_task(task);
-                       task = NULL;
-               }
-       }
-ex_err:
-       BUG_ON(retry == 3 && task != NULL);
-       sas_free_task(task);
-       return res;
-}
-
-#define DISCOVER_REQ_SIZE  16
-#define DISCOVER_RESP_SIZE 56
-
-int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
-                                      struct domain_device *dev,
-                                      int phy_id, int *adt)
-{
-       struct smp_resp *disc_resp;
-       u8 *disc_req;
-       int res;
-
-       disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
-       if (!disc_resp)
-               return -ENOMEM;
-
-       disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
-       if (disc_req) {
-               disc_req[0] = SMP_REQUEST;
-               disc_req[1] = SMP_DISCOVER;
-               disc_req[9] = phy_id;
-       } else {
-               kfree(disc_resp);
-               return -ENOMEM;
-       }
-       res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
-                                   disc_resp, DISCOVER_RESP_SIZE);
-       if (!res) {
-               if (disc_resp->result != SMP_RESP_FUNC_ACC)
-                       res = disc_resp->result;
-               else
-                       *adt = disc_resp->disc.attached_dev_type;
+       /* As soon as something is in the terminate path, deallocation is
+        * managed there.  Note that the final non-managed state of a task
+        * request is "completed".
+        */
+       if ((ireq->status == completed) ||
+           !isci_request_is_dealloc_managed(ireq->status)) {
+               isci_request_change_state(ireq, unallocated);
+               isci_free_tag(ihost, ireq->io_tag);
+               list_del_init(&ireq->dev_node);
        }
-       kfree(disc_req);
-       kfree(disc_resp);
-
-       return res;
-}
-
-static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
-{
-       struct domain_device *dev = idev->domain_dev;
-       struct isci_port *iport = idev->isci_port;
-       struct isci_host *ihost = iport->isci_host;
-       int res, iteration = 0, attached_device_type;
-       #define STP_WAIT_MSECS 25000
-       unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
-       unsigned long deadline = jiffies + tmo;
-       enum {
-               SMP_PHYWAIT_PHYDOWN,
-               SMP_PHYWAIT_PHYUP,
-               SMP_PHYWAIT_DONE
-       } phy_state = SMP_PHYWAIT_PHYDOWN;
-
-       /* While there is time, wait for the phy to go away and come back */
-       while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
-               int event = atomic_read(&iport->event);
-
-               ++iteration;
-
-               tmo = wait_event_timeout(ihost->eventq,
-                                        event != atomic_read(&iport->event) ||
-                                        !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
-                                        tmo);
-               /* link down, stop polling */
-               if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
-                       break;
 
-               dev_dbg(&ihost->pdev->dev,
-                       "%s: iport %p, iteration %d,"
-                       " phase %d: time_remaining %lu, bcns = %d\n",
-                       __func__, iport, iteration, phy_state,
-                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
-
-               res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
-                                                        &attached_device_type);
-               tmo = deadline - jiffies;
-
-               if (res) {
-                       dev_dbg(&ihost->pdev->dev,
-                                "%s: iteration %d, phase %d:"
-                                " SMP error=%d, time_remaining=%lu\n",
-                                __func__, iteration, phy_state, res, tmo);
-                       break;
-               }
-               dev_dbg(&ihost->pdev->dev,
-                       "%s: iport %p, iteration %d,"
-                       " phase %d: time_remaining %lu, bcns = %d, "
-                       "attdevtype = %x\n",
-                       __func__, iport, iteration, phy_state,
-                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
-                       attached_device_type);
-
-               switch (phy_state) {
-               case SMP_PHYWAIT_PHYDOWN:
-                       /* Has the device gone away? */
-                       if (!attached_device_type)
-                               phy_state = SMP_PHYWAIT_PHYUP;
-
-                       break;
-
-               case SMP_PHYWAIT_PHYUP:
-                       /* Has the device come back? */
-                       if (attached_device_type)
-                               phy_state = SMP_PHYWAIT_DONE;
-                       break;
-
-               case SMP_PHYWAIT_DONE:
-                       break;
-               }
+       /* "request_complete" is set if the task was being terminated. */
+       if (request_complete)
+               complete(request_complete);
 
-       }
-       dev_dbg(&ihost->pdev->dev, "%s: done\n",  __func__);
+       /* The task management part completes last. */
+       if (tmf_complete)
+               complete(tmf_complete);
 }
 
 static int isci_reset_device(struct isci_host *ihost,
                             struct isci_remote_device *idev)
 {
        struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
-       struct isci_port *iport = idev->isci_port;
        enum sci_status status;
        unsigned long flags;
        int rc;
@@ -1564,13 +1354,6 @@ static int isci_reset_device(struct isci_host *ihost,
        }
        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-       /* Make sure all pending requests are able to be fully terminated. */
-       isci_device_clear_reset_pending(ihost, idev);
-
-       /* If this is a device on an expander, disable BCN processing. */
-       if (!scsi_is_sas_phy_local(phy))
-               set_bit(IPORT_BCN_BLOCKED, &iport->flags);
-
        rc = sas_phy_reset(phy, true);
 
        /* Terminate in-progress I/O now. */
@@ -1581,21 +1364,6 @@ static int isci_reset_device(struct isci_host *ihost,
        status = sci_remote_device_reset_complete(idev);
        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-       /* If this is a device on an expander, bring the phy back up. */
-       if (!scsi_is_sas_phy_local(phy)) {
-               /* A phy reset will cause the device to go away then reappear.
-                * Since libsas will take action on incoming BCNs (eg. remove
-                * a device going through an SMP phy-control driven reset),
-                * we need to wait until the phy comes back up before letting
-                * discovery proceed in libsas.
-                */
-               isci_wait_for_smp_phy_reset(idev, phy->number);
-
-               spin_lock_irqsave(&ihost->scic_lock, flags);
-               isci_port_bcn_enable(ihost, idev->isci_port);
-               spin_unlock_irqrestore(&ihost->scic_lock, flags);
-       }
-
        if (status != SCI_SUCCESS) {
                dev_dbg(&ihost->pdev->dev,
                         "%s: sci_remote_device_reset_complete(%p) "
index 15b18d1..bc78c0a 100644 (file)
@@ -58,6 +58,8 @@
 #include <scsi/sas_ata.h>
 #include "host.h"
 
+#define ISCI_TERMINATION_TIMEOUT_MSEC 500
+
 struct isci_request;
 
 /**
@@ -224,35 +226,6 @@ enum isci_completion_selection {
        isci_perform_error_io_completion        /* Use sas_task_abort */
 };
 
-static inline void isci_set_task_doneflags(
-       struct sas_task *task)
-{
-       /* Since no futher action will be taken on this task,
-        * make sure to mark it complete from the lldd perspective.
-        */
-       task->task_state_flags |= SAS_TASK_STATE_DONE;
-       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
-       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
-}
-/**
- * isci_task_all_done() - This function clears the task bits to indicate the
- *    LLDD is done with the task.
- *
- *
- */
-static inline void isci_task_all_done(
-       struct sas_task *task)
-{
-       unsigned long flags;
-
-       /* Since no futher action will be taken on this task,
-        * make sure to mark it complete from the lldd perspective.
-        */
-       spin_lock_irqsave(&task->task_state_lock, flags);
-       isci_set_task_doneflags(task);
-       spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
 /**
  * isci_task_set_completion_status() - This function sets the completion status
  *    for the request.
@@ -334,7 +307,9 @@ isci_task_set_completion_status(
                /* Fall through to the normal case... */
        case isci_perform_normal_io_completion:
                /* Normal notification (task_done) */
-               isci_set_task_doneflags(task);
+               task->task_state_flags |= SAS_TASK_STATE_DONE;
+               task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+                                           SAS_TASK_STATE_PENDING);
                break;
        default:
                WARN_ONCE(1, "unknown task_notification_selection: %d\n",
index 7c055fd..1b22130 100644 (file)
@@ -469,6 +469,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
        struct fc_frame_header *fh = fc_frame_header_get(fp);
        int error;
        u32 f_ctl;
+       u8 fh_type = fh->fh_type;
 
        ep = fc_seq_exch(sp);
        WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
@@ -493,7 +494,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
         */
        error = lport->tt.frame_send(lport, fp);
 
-       if (fh->fh_type == FC_TYPE_BLS)
+       if (fh_type == FC_TYPE_BLS)
                return error;
 
        /*
@@ -1792,6 +1793,9 @@ restart:
                        goto restart;
                }
        }
+       pool->next_index = 0;
+       pool->left = FC_XID_UNKNOWN;
+       pool->right = FC_XID_UNKNOWN;
        spin_unlock_bh(&pool->lock);
 }
 
@@ -2280,6 +2284,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
                goto free_mempool;
        for_each_possible_cpu(cpu) {
                pool = per_cpu_ptr(mp->pool, cpu);
+               pool->next_index = 0;
                pool->left = FC_XID_UNKNOWN;
                pool->right = FC_XID_UNKNOWN;
                spin_lock_init(&pool->lock);
index 628f347..2cb12b9 100644 (file)
@@ -1030,16 +1030,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
                           FCH_EVT_LIPRESET, 0);
        fc_vports_linkchange(lport);
        fc_lport_reset_locked(lport);
-       if (lport->link_up) {
-               /*
-                * Wait upto resource allocation time out before
-                * doing re-login since incomplete FIP exchanged
-                * from last session may collide with exchanges
-                * in new session.
-                */
-               msleep(lport->r_a_tov);
+       if (lport->link_up)
                fc_lport_enter_flogi(lport);
-       }
 }
 
 /**
@@ -1481,6 +1473,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
                         void *lp_arg)
 {
        struct fc_lport *lport = lp_arg;
+       struct fc_frame_header *fh;
        struct fc_els_flogi *flp;
        u32 did;
        u16 csp_flags;
@@ -1508,49 +1501,56 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
                goto err;
        }
 
+       fh = fc_frame_header_get(fp);
        did = fc_frame_did(fp);
-       if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
-               flp = fc_frame_payload_get(fp, sizeof(*flp));
-               if (flp) {
-                       mfs = ntohs(flp->fl_csp.sp_bb_data) &
-                               FC_SP_BB_DATA_MASK;
-                       if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
-                           mfs < lport->mfs)
-                               lport->mfs = mfs;
-                       csp_flags = ntohs(flp->fl_csp.sp_features);
-                       r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
-                       e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
-                       if (csp_flags & FC_SP_FT_EDTR)
-                               e_d_tov /= 1000000;
-
-                       lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
-
-                       if ((csp_flags & FC_SP_FT_FPORT) == 0) {
-                               if (e_d_tov > lport->e_d_tov)
-                                       lport->e_d_tov = e_d_tov;
-                               lport->r_a_tov = 2 * e_d_tov;
-                               fc_lport_set_port_id(lport, did, fp);
-                               printk(KERN_INFO "host%d: libfc: "
-                                      "Port (%6.6x) entered "
-                                      "point-to-point mode\n",
-                                      lport->host->host_no, did);
-                               fc_lport_ptp_setup(lport, fc_frame_sid(fp),
-                                                  get_unaligned_be64(
-                                                          &flp->fl_wwpn),
-                                                  get_unaligned_be64(
-                                                          &flp->fl_wwnn));
-                       } else {
-                               lport->e_d_tov = e_d_tov;
-                               lport->r_a_tov = r_a_tov;
-                               fc_host_fabric_name(lport->host) =
-                                       get_unaligned_be64(&flp->fl_wwnn);
-                               fc_lport_set_port_id(lport, did, fp);
-                               fc_lport_enter_dns(lport);
-                       }
-               }
-       } else {
-               FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
+       if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
+           fc_frame_payload_op(fp) != ELS_LS_ACC) {
+               FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
                fc_lport_error(lport, fp);
+               goto err;
+       }
+
+       flp = fc_frame_payload_get(fp, sizeof(*flp));
+       if (!flp) {
+               FC_LPORT_DBG(lport, "FLOGI bad response\n");
+               fc_lport_error(lport, fp);
+               goto err;
+       }
+
+       mfs = ntohs(flp->fl_csp.sp_bb_data) &
+               FC_SP_BB_DATA_MASK;
+       if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+           mfs < lport->mfs)
+               lport->mfs = mfs;
+       csp_flags = ntohs(flp->fl_csp.sp_features);
+       r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+       e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+       if (csp_flags & FC_SP_FT_EDTR)
+               e_d_tov /= 1000000;
+
+       lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
+
+       if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+               if (e_d_tov > lport->e_d_tov)
+                       lport->e_d_tov = e_d_tov;
+               lport->r_a_tov = 2 * e_d_tov;
+               fc_lport_set_port_id(lport, did, fp);
+               printk(KERN_INFO "host%d: libfc: "
+                      "Port (%6.6x) entered "
+                      "point-to-point mode\n",
+                      lport->host->host_no, did);
+               fc_lport_ptp_setup(lport, fc_frame_sid(fp),
+                                  get_unaligned_be64(
+                                          &flp->fl_wwpn),
+                                  get_unaligned_be64(
+                                          &flp->fl_wwnn));
+       } else {
+               lport->e_d_tov = e_d_tov;
+               lport->r_a_tov = r_a_tov;
+               fc_host_fabric_name(lport->host) =
+                       get_unaligned_be64(&flp->fl_wwnn);
+               fc_lport_set_port_id(lport, did, fp);
+               fc_lport_enter_dns(lport);
        }
 
 out:
index 3105d5e..8dc1b32 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2011 LSI Corporation.
  *
  *
  *           Name:  mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.18
+ *  mpi2.h Version:  02.00.20
  *
  *  Version History
  *  ---------------
@@ -66,6 +66,9 @@
  *  08-11-10  02.00.17  Bumped MPI2_HEADER_VERSION_UNIT.
  *  11-10-10  02.00.18  Bumped MPI2_HEADER_VERSION_UNIT.
  *                      Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ *  02-23-11  02.00.19  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ *  03-09-11  02.00.20  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -91,7 +94,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x12)
+#define MPI2_HEADER_VERSION_UNIT            (0x14)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -515,6 +518,8 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
 #define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION   (0x2F)
 /* Power Management Control */
 #define MPI2_FUNCTION_PWR_MGMT_CONTROL              (0x30)
+/* Send Host Message */
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE             (0x31)
 /* beginning of product-specific range */
 #define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC          (0xF0)
 /* end of product-specific range */
index 61475a6..cfd95b4 100644 (file)
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2011 LSI Corporation.
  *
  *
  *           Name:  mpi2_cnfg.h
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.17
+ *    mpi2_cnfg.h Version:  02.00.19
  *
  *  Version History
  *  ---------------
  *                      to MPI2_CONFIG_PAGE_IO_UNIT_7.
  *                      Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
  *                      and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ *  02-23-11  02.00.18  Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ *                      Added IO Unit Page 8, IO Unit Page 9,
+ *                      and IO Unit Page 10.
+ *                      Added SASNotifyPrimitiveMasks field to
+ *                      MPI2_CONFIG_PAGE_IOC_7.
+ *  03-09-11  02.00.19  Fixed IO Unit Page 10 (to match the spec).
  *  --------------------------------------------------------------------------
  */
 
@@ -329,7 +335,9 @@ typedef struct _MPI2_CONFIG_REQUEST
     U8                      VP_ID;                      /* 0x08 */
     U8                      VF_ID;                      /* 0x09 */
     U16                     Reserved1;                  /* 0x0A */
-    U32                     Reserved2;                  /* 0x0C */
+       U8                      Reserved2;                  /* 0x0C */
+       U8                      ProxyVF_ID;                 /* 0x0D */
+       U16                     Reserved4;                  /* 0x0E */
     U32                     Reserved3;                  /* 0x10 */
     MPI2_CONFIG_PAGE_HEADER Header;                     /* 0x14 */
     U32                     PageAddress;                /* 0x18 */
@@ -915,6 +923,120 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
 #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT      (0x01)
 #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS         (0x02)
 
+/* IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS     (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+       U16                     Flags;                /* 0x00 */
+       U16                     Reserved1;            /* 0x02 */
+       U16
+               Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */
+       U32                     Reserved2;            /* 0x0C */
+       U32                     Reserved3;            /* 0x10 */
+       U32                     Reserved4;            /* 0x14 */
+} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR,
+Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t;
+
+/* defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE         (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE         (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE         (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE         (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES     (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+       MPI2_CONFIG_PAGE_HEADER Header;               /* 0x00 */
+       U32                     Reserved1;            /* 0x04 */
+       U32                     Reserved2;            /* 0x08 */
+       U8                      NumSensors;           /* 0x0C */
+       U8                      PollingInterval;      /* 0x0D */
+       U16                     Reserved3;            /* 0x0E */
+       MPI2_IOUNIT8_SENSOR
+                       Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION                    (0x00)
+
+
+/* IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+       U16                     CurrentTemperature;     /* 0x00 */
+       U16                     Reserved1;              /* 0x02 */
+       U8                      Flags;                  /* 0x04 */
+       U8                      Reserved2;              /* 0x05 */
+       U16                     Reserved3;              /* 0x06 */
+       U32                     Reserved4;              /* 0x08 */
+       U32                     Reserved5;              /* 0x0C */
+} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR,
+Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t;
+
+/* defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID        (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES     (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+       MPI2_CONFIG_PAGE_HEADER Header;                /* 0x00 */
+       U32                     Reserved1;             /* 0x04 */
+       U32                     Reserved2;             /* 0x08 */
+       U8                      NumSensors;            /* 0x0C */
+       U8                      Reserved4;             /* 0x0D */
+       U16                     Reserved3;             /* 0x0E */
+       MPI2_IOUNIT9_SENSOR
+                       Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION                    (0x00)
+
+
+/* IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+       U8                      CreditPercent;      /* 0x00 */
+       U8                      Reserved1;          /* 0x01 */
+       U16                     Reserved2;          /* 0x02 */
+} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION,
+Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES      (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+       MPI2_CONFIG_PAGE_HEADER Header;                    /* 0x00 */
+       U8                      NumFunctions;             /* 0x04 */
+       U8                      Reserved1;              /* 0x05 */
+       U16                     Reserved2;              /* 0x06 */
+       U32                     Reserved3;              /* 0x08 */
+       U32                     Reserved4;              /* 0x0C */
+       MPI2_IOUNIT10_FUNCTION
+               Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION                   (0x01)
+
 
 
 /****************************************************************************
@@ -1022,12 +1144,12 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_7
     U32                     Reserved1;                  /* 0x04 */
     U32                     EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */
     U16                     SASBroadcastPrimitiveMasks; /* 0x18 */
-    U16                     Reserved2;                  /* 0x1A */
+       U16                     SASNotifyPrimitiveMasks;    /* 0x1A */
     U32                     Reserved3;                  /* 0x1C */
 } MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7,
   Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t;
 
-#define MPI2_IOCPAGE7_PAGEVERSION                       (0x01)
+#define MPI2_IOCPAGE7_PAGEVERSION                       (0x02)
 
 
 /* IOC Page 8 */
@@ -2070,16 +2192,16 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
 #define MPI2_SASIOUNITPAGE8_PAGEVERSION     (0x00)
 
 /* defines for PowerManagementCapabilities field */
-#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD          (0x000001000)
-#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE        (0x000000800)
-#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE        (0x000000400)
-#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE       (0x000000200)
-#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE       (0x000000100)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD        (0x000000010)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE      (0x000000008)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE      (0x000000004)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE     (0x000000002)
-#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE     (0x000000001)
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD          (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE        (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE        (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE       (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE       (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD        (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE      (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE      (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE     (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE     (0x00000001)
 
 
 
@@ -2266,6 +2388,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
 /* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
 
 /* values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE          (0x8000)
 #define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE           (0x1000)
 #define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE           (0x0800)
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY     (0x0400)
index 1f0c190..93d9b69 100644 (file)
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2011 LSI Corporation.
  *
  *
  *           Name:  mpi2_ioc.h
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.16
+ *  mpi2_ioc.h Version:  02.00.17
  *
  *  Version History
  *  ---------------
  *  05-12-10  02.00.15  Marked Task Set Full Event as obsolete.
  *                      Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
  *  11-10-10  02.00.16  Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ *  02-23-11  02.00.17  Added SAS NOTIFY Primitive event, and added
+ *                      SASNotifyPrimitiveMasks field to
+ *                      MPI2_EVENT_NOTIFICATION_REQUEST.
+ *                      Added Temperature Threshold Event.
+ *                      Added Host Message Event.
+ *                      Added Send Host Message request and reply.
  *  --------------------------------------------------------------------------
  */
 
@@ -421,7 +427,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST
     U32                     Reserved6;                      /* 0x10 */
     U32                     EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */
     U16                     SASBroadcastPrimitiveMasks;     /* 0x24 */
-    U16                     Reserved7;                      /* 0x26 */
+        U16                     SASNotifyPrimitiveMasks;        /* 0x26 */
     U32                     Reserved8;                      /* 0x28 */
 } MPI2_EVENT_NOTIFICATION_REQUEST,
   MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
@@ -476,6 +482,9 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
 #define MPI2_EVENT_GPIO_INTERRUPT                   (0x0023)
 #define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY         (0x0024)
 #define MPI2_EVENT_SAS_QUIESCE                      (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE             (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD                   (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE                     (0x0028)
 
 
 /* Log Entry Added Event data */
@@ -507,6 +516,39 @@ typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
   MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
   Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
 
+/* Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+       U16         Status;                             /* 0x00 */
+       U8          SensorNum;                          /* 0x02 */
+       U8          Reserved1;                          /* 0x03 */
+       U16         CurrentTemperature;                 /* 0x04 */
+       U16         Reserved2;                          /* 0x06 */
+       U32         Reserved3;                          /* 0x08 */
+       U32         Reserved4;                          /* 0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE,
+Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t;
+
+/* Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED            (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED            (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED            (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED            (0x0001)
+
+
+/* Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+       U8          SourceVF_ID;                        /* 0x00 */
+       U8          Reserved1;                          /* 0x01 */
+       U16         Reserved2;                          /* 0x02 */
+       U32         Reserved3;                          /* 0x04 */
+       U32         HostData[1];                        /* 0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t;
+
+
 /* Hard Reset Received Event data */
 
 typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
@@ -749,6 +791,24 @@ typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE
 #define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED               (0x07)
 #define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED               (0x08)
 
+/* SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+       U8                      PhyNum;                     /* 0x00 */
+       U8                      Port;                       /* 0x01 */
+       U8                      Reserved1;                  /* 0x02 */
+       U8                      Primitive;                  /* 0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+Mpi2EventDataSasNotifyPrimitive_t,
+MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP                     (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED               (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1                         (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2                         (0x04)
+
 
 /* SAS Initiator Device Status Change Event data */
 
@@ -1000,6 +1060,53 @@ typedef struct _MPI2_EVENT_ACK_REPLY
   Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t;
 
 
+/****************************************************************************
+*  SendHostMessage message
+****************************************************************************/
+
+/* SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+       U16                     HostDataLength;                 /* 0x00 */
+       U8                      ChainOffset;                    /* 0x02 */
+       U8                      Function;                       /* 0x03 */
+       U16                     Reserved1;                      /* 0x04 */
+       U8                      Reserved2;                      /* 0x06 */
+       U8                      MsgFlags;                       /* 0x07 */
+       U8                      VP_ID;                          /* 0x08 */
+       U8                      VF_ID;                          /* 0x09 */
+       U16                     Reserved3;                      /* 0x0A */
+       U8                      Reserved4;                      /* 0x0C */
+       U8                      DestVF_ID;                      /* 0x0D */
+       U16                     Reserved5;                      /* 0x0E */
+       U32                     Reserved6;                      /* 0x10 */
+       U32                     Reserved7;                      /* 0x14 */
+       U32                     Reserved8;                      /* 0x18 */
+       U32                     Reserved9;                      /* 0x1C */
+       U32                     Reserved10;                     /* 0x20 */
+       U32                     HostData[1];                    /* 0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t;
+
+
+/* SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+       U16                     HostDataLength;                 /* 0x00 */
+       U8                      MsgLength;                      /* 0x02 */
+       U8                      Function;                       /* 0x03 */
+       U16                     Reserved1;                      /* 0x04 */
+       U8                      Reserved2;                      /* 0x06 */
+       U8                      MsgFlags;                       /* 0x07 */
+       U8                      VP_ID;                          /* 0x08 */
+       U8                      VF_ID;                          /* 0x09 */
+       U16                     Reserved3;                      /* 0x0A */
+       U16                     Reserved4;                      /* 0x0C */
+       U16                     IOCStatus;                      /* 0x0E */
+       U32                     IOCLogInfo;                     /* 0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t;
+
+
 /****************************************************************************
 *  FWDownload message
 ****************************************************************************/
index 81209ca..beda04a 100644 (file)
@@ -81,6 +81,15 @@ static int missing_delay[2] = {-1, -1};
 module_param_array(missing_delay, int, NULL, 0);
 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
 
+static int mpt2sas_fwfault_debug;
+MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+       "and halt firmware - (default=0)");
+
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
 /* diag_buffer_enable is bitwise
  * bit 0 set = TRACE
  * bit 1 set = SNAPSHOT
@@ -93,14 +102,6 @@ module_param(diag_buffer_enable, int, 0);
 MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
     "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
 
-static int mpt2sas_fwfault_debug;
-MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
-    "and halt firmware - (default=0)");
-
-static int disable_discovery = -1;
-module_param(disable_discovery, int, 0);
-MODULE_PARM_DESC(disable_discovery, " disable discovery ");
-
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  *
@@ -691,6 +692,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
                memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
        }
        ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
+
        complete(&ioc->base_cmds.done);
        return 1;
 }
@@ -3469,6 +3471,58 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        return 0;
 }
 
+/**
+ * mpt2sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ *        0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+       u32 reply)
+{
+       MPI2DefaultReply_t *mpi_reply;
+       u16 ioc_status;
+
+       mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+       if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+               return 1;
+
+       if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
+               return 1;
+
+       ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
+       if (mpi_reply) {
+               ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
+               memcpy(ioc->port_enable_cmds.reply, mpi_reply,
+                   mpi_reply->MsgLength*4);
+       }
+       ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
+
+       ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+       if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+               ioc->port_enable_failed = 1;
+
+       if (ioc->is_driver_loading) {
+               if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+                       mpt2sas_port_enable_complete(ioc);
+                       return 1;
+               } else {
+                       ioc->start_scan_failed = ioc_status;
+                       ioc->start_scan = 0;
+                       return 1;
+               }
+       }
+       complete(&ioc->port_enable_cmds.done);
+       return 1;
+}
+
+
 /**
  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  * @ioc: per adapter object
@@ -3480,66 +3534,150 @@ static int
 _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 {
        Mpi2PortEnableRequest_t *mpi_request;
-       u32 ioc_state;
+       Mpi2PortEnableReply_t *mpi_reply;
        unsigned long timeleft;
        int r = 0;
        u16 smid;
+       u16 ioc_status;
 
        printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
 
-       if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
+       if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
                printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
                    ioc->name, __func__);
                return -EAGAIN;
        }
 
-       smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+       smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
        if (!smid) {
                printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
                    ioc->name, __func__);
                return -EAGAIN;
        }
 
-       ioc->base_cmds.status = MPT2_CMD_PENDING;
+       ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
        mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
-       ioc->base_cmds.smid = smid;
+       ioc->port_enable_cmds.smid = smid;
        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
-       mpi_request->VF_ID = 0; /* TODO */
-       mpi_request->VP_ID = 0;
 
+       init_completion(&ioc->port_enable_cmds.done);
        mpt2sas_base_put_smid_default(ioc, smid);
-       init_completion(&ioc->base_cmds.done);
-       timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+       timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
            300*HZ);
-       if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
+       if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
                printk(MPT2SAS_ERR_FMT "%s: timeout\n",
                    ioc->name, __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2PortEnableRequest_t)/4);
-               if (ioc->base_cmds.status & MPT2_CMD_RESET)
+               if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
                        r = -EFAULT;
                else
                        r = -ETIME;
                goto out;
-       } else
-               dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
-                   ioc->name, __func__));
+       }
+       mpi_reply = ioc->port_enable_cmds.reply;
 
-       ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
-           60, sleep_flag);
-       if (ioc_state) {
-               printk(MPT2SAS_ERR_FMT "%s: failed going to operational state "
-                   " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
+       ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+       if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+               printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
+                   ioc->name, __func__, ioc_status);
                r = -EFAULT;
+               goto out;
        }
  out:
-       ioc->base_cmds.status = MPT2_CMD_NOT_USED;
-       printk(MPT2SAS_INFO_FMT "port enable: %s\n",
-           ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+       ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+       printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+           "SUCCESS" : "FAILED"));
        return r;
 }
 
+/**
+ * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
+{
+       Mpi2PortEnableRequest_t *mpi_request;
+       u16 smid;
+
+       printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
+
+       if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+               printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
+                   ioc->name, __func__);
+               return -EAGAIN;
+       }
+
+       smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+       if (!smid) {
+               printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+                   ioc->name, __func__);
+               return -EAGAIN;
+       }
+
+       ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
+       mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+       ioc->port_enable_cmds.smid = smid;
+       memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+       mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+       mpt2sas_base_put_smid_default(ioc, smid);
+       return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
+{
+       /* We wait for discovery to complete if IR firmware is loaded.
+        * The sas topology events arrive before PD events, so we need time to
+        * turn on the bit in ioc->pd_handles to indicate PD
+        * Also, it maybe required to report Volumes ahead of physical
+        * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+        */
+       if (ioc->ir_firmware)
+               return 1;
+
+       /* if no Bios, then we don't need to wait */
+       if (!ioc->bios_pg3.BiosVersion)
+               return 0;
+
+       /* Bios is present, then we drop down here.
+        *
+        * If there any entries in the Bios Page 2, then we wait
+        * for discovery to complete.
+        */
+
+       /* Current Boot Device */
+       if ((ioc->bios_pg2.CurrentBootDeviceForm &
+           MPI2_BIOSPAGE2_FORM_MASK) ==
+           MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+       /* Request Boot Device */
+          (ioc->bios_pg2.ReqBootDeviceForm &
+           MPI2_BIOSPAGE2_FORM_MASK) ==
+           MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+       /* Alternate Request Boot Device */
+          (ioc->bios_pg2.ReqAltBootDeviceForm &
+           MPI2_BIOSPAGE2_FORM_MASK) ==
+           MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+               return 0;
+
+       return 1;
+}
+
+
 /**
  * _base_unmask_events - turn on notification for this event
  * @ioc: per adapter object
@@ -3962,6 +4100,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
  skip_init_reply_post_host_index:
 
        _base_unmask_interrupts(ioc);
+
        r = _base_event_notification(ioc, sleep_flag);
        if (r)
                return r;
@@ -3969,7 +4108,18 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        if (sleep_flag == CAN_SLEEP)
                _base_static_config_pages(ioc);
 
-       if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) {
+
+       if (ioc->is_driver_loading) {
+
+
+
+               ioc->wait_for_discovery_to_complete =
+                   _base_determine_wait_on_discovery(ioc);
+               return r; /* scan_start and scan_finished support */
+       }
+
+
+       if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
                if (ioc->manu_pg10.OEMIdentifier  == 0x80) {
                        hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
                            MFG_PAGE10_HIDE_SSDS_MASK);
@@ -3978,13 +4128,6 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
                }
        }
 
-       if (ioc->wait_for_port_enable_to_complete) {
-               if (diag_buffer_enable != 0)
-                       mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
-               if (disable_discovery > 0)
-                       return r;
-       }
-
        r = _base_send_port_enable(ioc, sleep_flag);
        if (r)
                return r;
@@ -4121,6 +4264,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
        ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
        ioc->base_cmds.status = MPT2_CMD_NOT_USED;
 
+       /* port_enable command bits */
+       ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+       ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+
        /* transport internal command bits */
        ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
        ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
@@ -4162,8 +4309,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
                goto out_free_resources;
        }
 
-       init_completion(&ioc->shost_recovery_done);
-
        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
                ioc->event_masks[i] = -1;
 
@@ -4186,7 +4331,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
                _base_update_missing_delay(ioc, missing_delay[0],
                    missing_delay[1]);
 
-       mpt2sas_base_start_watchdog(ioc);
        return 0;
 
  out_free_resources:
@@ -4204,6 +4348,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
        kfree(ioc->scsih_cmds.reply);
        kfree(ioc->config_cmds.reply);
        kfree(ioc->base_cmds.reply);
+       kfree(ioc->port_enable_cmds.reply);
        kfree(ioc->ctl_cmds.reply);
        kfree(ioc->ctl_cmds.sense);
        kfree(ioc->pfacts);
@@ -4243,6 +4388,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
        kfree(ioc->ctl_cmds.reply);
        kfree(ioc->ctl_cmds.sense);
        kfree(ioc->base_cmds.reply);
+       kfree(ioc->port_enable_cmds.reply);
        kfree(ioc->tm_cmds.reply);
        kfree(ioc->transport_cmds.reply);
        kfree(ioc->scsih_cmds.reply);
@@ -4284,6 +4430,20 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
                        mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
                        complete(&ioc->base_cmds.done);
                }
+               if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+                       ioc->port_enable_failed = 1;
+                       ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
+                       mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+                       if (ioc->is_driver_loading) {
+                               ioc->start_scan_failed =
+                                   MPI2_IOCSTATUS_INTERNAL_ERROR;
+                               ioc->start_scan = 0;
+                               ioc->port_enable_cmds.status =
+                                               MPT2_CMD_NOT_USED;
+                       } else
+                               complete(&ioc->port_enable_cmds.done);
+
+               }
                if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
                        ioc->config_cmds.status |= MPT2_CMD_RESET;
                        mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
@@ -4349,7 +4509,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
 {
        int r;
        unsigned long flags;
-       u8 pe_complete = ioc->wait_for_port_enable_to_complete;
 
        dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
            __func__));
@@ -4396,7 +4555,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
        /* If this hard reset is called while port enable is active, then
         * there is no reason to call make_ioc_operational
         */
-       if (pe_complete) {
+       if (ioc->is_driver_loading && ioc->port_enable_failed) {
+               ioc->remove_host = 1;
                r = -EFAULT;
                goto out;
        }
@@ -4410,7 +4570,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
        ioc->ioc_reset_in_progress_status = r;
        ioc->shost_recovery = 0;
-       complete(&ioc->shost_recovery_done);
        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
        mutex_unlock(&ioc->reset_in_progress_mutex);
 
index 59354db..3c3babc 100644 (file)
 #define MPT2SAS_DRIVER_NAME            "mpt2sas"
 #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION    "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION         "09.100.00.01"
-#define MPT2SAS_MAJOR_VERSION          09
+#define MPT2SAS_DRIVER_VERSION         "10.100.00.00"
+#define MPT2SAS_MAJOR_VERSION          10
 #define MPT2SAS_MINOR_VERSION          100
 #define MPT2SAS_BUILD_VERSION          00
-#define MPT2SAS_RELEASE_VERSION                01
+#define MPT2SAS_RELEASE_VERSION                00
 
 /*
  * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -655,7 +655,12 @@ enum mutex_type {
  * @ignore_loginfos: ignore loginfos during task management
  * @remove_host: flag for when driver unloads, to avoid sending dev resets
  * @pci_error_recovery: flag to prevent ioc access until slot reset completes
- * @wait_for_port_enable_to_complete:
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ *                                               waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt2sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
  * @msix_enable: flag indicating msix is enabled
  * @msix_vector_count: number msix vectors
  * @cpu_msix_table: table for mapping cpus to msix index
@@ -790,15 +795,20 @@ struct MPT2SAS_ADAPTER {
        u8              shost_recovery;
 
        struct mutex    reset_in_progress_mutex;
-       struct completion       shost_recovery_done;
        spinlock_t      ioc_reset_in_progress_lock;
        u8              ioc_link_reset_in_progress;
-       int             ioc_reset_in_progress_status;
+       u8              ioc_reset_in_progress_status;
 
        u8              ignore_loginfos;
        u8              remove_host;
        u8              pci_error_recovery;
-       u8              wait_for_port_enable_to_complete;
+       u8              wait_for_discovery_to_complete;
+       struct completion       port_enable_done;
+       u8              is_driver_loading;
+       u8              port_enable_failed;
+
+       u8              start_scan;
+       u16             start_scan_failed;
 
        u8              msix_enable;
        u16             msix_vector_count;
@@ -814,11 +824,13 @@ struct MPT2SAS_ADAPTER {
        u8              scsih_cb_idx;
        u8              ctl_cb_idx;
        u8              base_cb_idx;
+       u8              port_enable_cb_idx;
        u8              config_cb_idx;
        u8              tm_tr_cb_idx;
        u8              tm_tr_volume_cb_idx;
        u8              tm_sas_control_cb_idx;
        struct _internal_cmd base_cmds;
+       struct _internal_cmd port_enable_cmds;
        struct _internal_cmd transport_cmds;
        struct _internal_cmd scsih_cmds;
        struct _internal_cmd tm_cmds;
@@ -1001,6 +1013,8 @@ void mpt2sas_base_release_callback_handler(u8 cb_idx);
 
 u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
     u32 reply);
+u8 mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+       u8 msix_index,  u32 reply);
 void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr);
 
 u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked);
@@ -1015,6 +1029,8 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
 
 void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
 
+int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
 /* scsih shared API */
 u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
@@ -1032,6 +1048,8 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAP
 struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
     struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
 
+void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
+
 void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
 
 /* config shared API */
index 2b11010..36ea0b2 100644 (file)
@@ -1356,6 +1356,9 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
        Mpi2ConfigReply_t mpi_reply;
        int r, i, config_page_sz;
        u16 ioc_status;
+       int config_num;
+       u16 element_type;
+       u16 phys_disk_dev_handle;
 
        *volume_handle = 0;
        memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
@@ -1371,35 +1374,53 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
        if (r)
                goto out;
 
-       mpi_request.PageAddress =
-           cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG);
        mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
        config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
        config_page = kmalloc(config_page_sz, GFP_KERNEL);
-       if (!config_page)
-               goto out;
-       r = _config_request(ioc, &mpi_request, &mpi_reply,
-           MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
-           config_page_sz);
-       if (r)
+       if (!config_page) {
+               r = -1;
                goto out;
-
-       r = -1;
-       ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
-       if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
-               goto out;
-       for (i = 0; i < config_page->NumElements; i++) {
-               if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
-                   MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
-                   MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
-                       continue;
-               if (le16_to_cpu(config_page->ConfigElement[i].
-                   PhysDiskDevHandle) == pd_handle) {
-                       *volume_handle = le16_to_cpu(config_page->
-                           ConfigElement[i].VolDevHandle);
-                       r = 0;
+       }
+       config_num = 0xff;
+       while (1) {
+               mpi_request.PageAddress = cpu_to_le32(config_num +
+                   MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+               r = _config_request(ioc, &mpi_request, &mpi_reply,
+                   MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+                   config_page_sz);
+               if (r)
+                       goto out;
+               r = -1;
+               ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+                   MPI2_IOCSTATUS_MASK;
+               if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
                        goto out;
+               for (i = 0; i < config_page->NumElements; i++) {
+                       element_type = le16_to_cpu(config_page->
+                           ConfigElement[i].ElementFlags) &
+                           MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+                       if (element_type ==
+                           MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+                           element_type ==
+                           MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+                               phys_disk_dev_handle =
+                                   le16_to_cpu(config_page->ConfigElement[i].
+                                   PhysDiskDevHandle);
+                               if (phys_disk_dev_handle == pd_handle) {
+                                       *volume_handle =
+                                           le16_to_cpu(config_page->
+                                           ConfigElement[i].VolDevHandle);
+                                       r = 0;
+                                       goto out;
+                               }
+                       } else if (element_type ==
+                           MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+                               *volume_handle = 0;
+                               r = 0;
+                               goto out;
+                       }
                }
+               config_num = config_page->ConfigNum;
        }
  out:
        kfree(config_page);
index 9adb013..aabcb91 100644 (file)
@@ -1207,6 +1207,9 @@ _ctl_do_reset(void __user *arg)
        if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
                return -ENODEV;
 
+       if (ioc->shost_recovery || ioc->pci_error_recovery ||
+               ioc->is_driver_loading)
+               return -EAGAIN;
        dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
            __func__));
 
@@ -2178,7 +2181,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
                    !ioc)
                        return -ENODEV;
 
-               if (ioc->shost_recovery || ioc->pci_error_recovery)
+               if (ioc->shost_recovery || ioc->pci_error_recovery ||
+                               ioc->is_driver_loading)
                        return -EAGAIN;
 
                if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
@@ -2297,7 +2301,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
        if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
                return -ENODEV;
 
-       if (ioc->shost_recovery || ioc->pci_error_recovery)
+       if (ioc->shost_recovery || ioc->pci_error_recovery ||
+                       ioc->is_driver_loading)
                return -EAGAIN;
 
        memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
index 1da1aa1..8889b1b 100644 (file)
@@ -71,6 +71,9 @@ static void _firmware_event_work(struct work_struct *work);
 
 static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid);
 
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
 /* global parameters */
 LIST_HEAD(mpt2sas_ioc_list);
 
@@ -79,6 +82,7 @@ static u8 scsi_io_cb_idx = -1;
 static u8 tm_cb_idx = -1;
 static u8 ctl_cb_idx = -1;
 static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
 static u8 transport_cb_idx = -1;
 static u8 scsih_cb_idx = -1;
 static u8 config_cb_idx = -1;
@@ -103,6 +107,18 @@ static int max_lun = MPT2SAS_MAX_LUN;
 module_param(max_lun, int, 0);
 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
 
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
+       "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+
 /**
  * struct sense_info - common structure for obtaining sense keys
  * @skey: sense key
@@ -117,8 +133,8 @@ struct sense_info {
 
 
 #define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
-#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
-
+#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
 /**
  * struct fw_event_work - firmware event struct
  * @list: link list framework
@@ -372,31 +388,34 @@ _scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle,
        Mpi2SasDevicePage0_t sas_device_pg0;
        Mpi2ConfigReply_t mpi_reply;
        u32 ioc_status;
+       *sas_address = 0;
 
        if (handle <= ioc->sas_hba.num_phys) {
                *sas_address = ioc->sas_hba.sas_address;
                return 0;
-       } else
-               *sas_address = 0;
+       }
 
        if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+               __FILE__, __LINE__, __func__);
                return -ENXIO;
        }
 
-       ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
-           MPI2_IOCSTATUS_MASK;
-       if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
-                   "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
-                    __FILE__, __LINE__, __func__);
-               return -EIO;
+       ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+       if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+               *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+               return 0;
        }
 
-       *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
-       return 0;
+       /* we hit this becuase the given parent handle doesn't exist */
+       if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+               return -ENXIO;
+       /* else error case */
+       printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), "
+           "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
+            __FILE__, __LINE__, __func__);
+       return -EIO;
 }
 
 /**
@@ -424,7 +443,11 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
        u16 slot;
 
         /* only process this function when driver loads */
-       if (!ioc->wait_for_port_enable_to_complete)
+       if (!ioc->is_driver_loading)
+               return;
+
+        /* no Bios, return immediately */
+       if (!ioc->bios_pg3.BiosVersion)
                return;
 
        if (!is_raid) {
@@ -587,8 +610,15 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
        if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
-            sas_device->sas_address_parent))
+            sas_device->sas_address_parent)) {
                _scsih_sas_device_remove(ioc, sas_device);
+               } else if (!sas_device->starget) {
+                       if (!ioc->is_driver_loading)
+                               mpt2sas_transport_port_remove(ioc,
+                               sas_device->sas_address,
+                           sas_device->sas_address_parent);
+                       _scsih_sas_device_remove(ioc, sas_device);
+               }
 }
 
 /**
@@ -1400,6 +1430,10 @@ _scsih_slave_destroy(struct scsi_device *sdev)
 {
        struct MPT2SAS_TARGET *sas_target_priv_data;
        struct scsi_target *starget;
+       struct Scsi_Host *shost;
+       struct MPT2SAS_ADAPTER *ioc;
+       struct _sas_device *sas_device;
+       unsigned long flags;
 
        if (!sdev->hostdata)
                return;
@@ -1407,6 +1441,19 @@ _scsih_slave_destroy(struct scsi_device *sdev)
        starget = scsi_target(sdev);
        sas_target_priv_data = starget->hostdata;
        sas_target_priv_data->num_luns--;
+
+       shost = dev_to_shost(&starget->dev);
+       ioc = shost_priv(shost);
+
+       if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+               spin_lock_irqsave(&ioc->sas_device_lock, flags);
+               sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+                  sas_target_priv_data->sas_address);
+               if (sas_device)
+                       sas_device->starget = NULL;
+               spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+       }
+
        kfree(sdev->hostdata);
        sdev->hostdata = NULL;
 }
@@ -1598,8 +1645,10 @@ _scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
  * _scsih_get_volume_capabilities - volume capabilities
  * @ioc: per adapter object
  * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
  */
-static void
+static int
 _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
     struct _raid_device *raid_device)
 {
@@ -1612,9 +1661,10 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
 
        if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
            &num_pds)) || !num_pds) {
-               printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
-               return;
+               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+                   __func__));
+               return 1;
        }
 
        raid_device->num_pds = num_pds;
@@ -1622,17 +1672,19 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
            sizeof(Mpi2RaidVol0PhysDisk_t));
        vol_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!vol_pg0) {
-               printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
-               return;
+               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+                   __func__));
+               return 1;
        }
 
        if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
             MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
-               printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+                   __func__));
                kfree(vol_pg0);
-               return;
+               return 1;
        }
 
        raid_device->volume_type = vol_pg0->VolumeType;
@@ -1652,6 +1704,7 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
        }
 
        kfree(vol_pg0);
+       return 0;
 }
 /**
  * _scsih_disable_ddio - Disable direct I/O for all the volumes
@@ -1922,13 +1975,20 @@ _scsih_slave_configure(struct scsi_device *sdev)
                     sas_target_priv_data->handle);
                spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
                if (!raid_device) {
-                       printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
-                       return 0;
+                       dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                           "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+                           __LINE__, __func__));
+                       return 1;
                }
 
                _scsih_get_volume_capabilities(ioc, raid_device);
 
+               if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+                       dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                           "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+                           __LINE__, __func__));
+                       return 1;
+               }
                /*
                 * WARPDRIVE: Initialize the required data for Direct IO
                 */
@@ -2002,11 +2062,22 @@ _scsih_slave_configure(struct scsi_device *sdev)
        if (sas_device) {
                if (sas_target_priv_data->flags &
                    MPT_TARGET_FLAGS_RAID_COMPONENT) {
-                       mpt2sas_config_get_volume_handle(ioc,
-                           sas_device->handle, &sas_device->volume_handle);
-                       mpt2sas_config_get_volume_wwid(ioc,
+                       if (mpt2sas_config_get_volume_handle(ioc,
+                           sas_device->handle, &sas_device->volume_handle)) {
+                               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                                   "failure at %s:%d/%s()!\n", ioc->name,
+                                   __FILE__, __LINE__, __func__));
+                               return 1;
+                       }
+                       if (sas_device->volume_handle &&
+                           mpt2sas_config_get_volume_wwid(ioc,
                            sas_device->volume_handle,
-                           &sas_device->volume_wwid);
+                           &sas_device->volume_wwid)) {
+                               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                                   "failure at %s:%d/%s()!\n", ioc->name,
+                                   __FILE__, __LINE__, __func__));
+                               return 1;
+                       }
                }
                if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
                        qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
@@ -2035,6 +2106,11 @@ _scsih_slave_configure(struct scsi_device *sdev)
 
                if (!ssp_target)
                        _scsih_display_sata_capabilities(ioc, sas_device, sdev);
+       } else {
+               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+                   __func__));
+               return 1;
        }
 
        _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
@@ -2714,22 +2790,38 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
 
 
 /**
- * _scsih_queue_rescan - queue a topology rescan from user context
+ * _scsih_error_recovery_delete_devices - remove devices not responding
  * @ioc: per adapter object
  *
  * Return nothing.
  */
 static void
-_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
+_scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
 {
        struct fw_event_work *fw_event;
 
-       if (ioc->wait_for_port_enable_to_complete)
+       if (ioc->is_driver_loading)
                return;
+       fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
+       fw_event->ioc = ioc;
+       _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt2sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
+{
+       struct fw_event_work *fw_event;
+
        fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
        if (!fw_event)
                return;
-       fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
+       fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
        fw_event->ioc = ioc;
        _scsih_fw_event_add(ioc, fw_event);
 }
@@ -2977,14 +3069,27 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        Mpi2SCSITaskManagementRequest_t *mpi_request;
        u16 smid;
        struct _sas_device *sas_device;
-       struct MPT2SAS_TARGET *sas_target_priv_data;
+       struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
+       u64 sas_address = 0;
        unsigned long flags;
        struct _tr_list *delayed_tr;
+       u32 ioc_state;
 
-       if (ioc->shost_recovery || ioc->remove_host ||
-           ioc->pci_error_recovery) {
-               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
-                  "progress!\n", __func__, ioc->name));
+       if (ioc->remove_host) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+                   "removed: handle(0x%04x)\n", __func__, ioc->name, handle));
+               return;
+       } else if (ioc->pci_error_recovery) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+                   "error recovery: handle(0x%04x)\n", __func__, ioc->name,
+                   handle));
+               return;
+       }
+       ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+       if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+                  "operational: handle(0x%04x)\n", __func__, ioc->name,
+                  handle));
                return;
        }
 
@@ -2998,13 +3103,18 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
             sas_device->starget->hostdata) {
                sas_target_priv_data = sas_device->starget->hostdata;
                sas_target_priv_data->deleted = 1;
-               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
-                   "setting delete flag: handle(0x%04x), "
-                   "sas_addr(0x%016llx)\n", ioc->name, handle,
-                   (unsigned long long) sas_device->sas_address));
+               sas_address = sas_device->sas_address;
        }
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
+       if (sas_target_priv_data) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
+               "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
+                       (unsigned long long)sas_address));
+               _scsih_ublock_io_device(ioc, handle);
+               sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
+       }
+
        smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
        if (!smid) {
                delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
@@ -3185,11 +3295,21 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
            mpt2sas_base_get_reply_virt_addr(ioc, reply);
        Mpi2SasIoUnitControlRequest_t *mpi_request;
        u16 smid_sas_ctrl;
+       u32 ioc_state;
 
-       if (ioc->shost_recovery || ioc->remove_host ||
-           ioc->pci_error_recovery) {
-               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
-                  "progress!\n", __func__, ioc->name));
+       if (ioc->remove_host) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+                  "removed\n", __func__, ioc->name));
+               return 1;
+       } else if (ioc->pci_error_recovery) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+                   "error recovery\n", __func__, ioc->name));
+               return 1;
+       }
+       ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+       if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+                   "operational\n", __func__, ioc->name));
                return 1;
        }
 
@@ -5099,7 +5219,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
        /* get device name */
        sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
 
-       if (ioc->wait_for_port_enable_to_complete)
+       if (ioc->wait_for_discovery_to_complete)
                _scsih_sas_device_init_add(ioc, sas_device);
        else
                _scsih_sas_device_add(ioc, sas_device);
@@ -5135,6 +5255,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
        if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
                sas_target_priv_data = sas_device_backup.starget->hostdata;
                sas_target_priv_data->deleted = 1;
+               _scsih_ublock_io_device(ioc, sas_device_backup.handle);
+               sas_target_priv_data->handle =
+                    MPT2SAS_INVALID_DEVICE_HANDLE;
        }
 
        _scsih_ublock_io_device(ioc, sas_device_backup.handle);
@@ -5288,7 +5411,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
                _scsih_sas_topology_change_event_debug(ioc, event_data);
 #endif
 
-       if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+       if (ioc->remove_host || ioc->pci_error_recovery)
                return;
 
        if (!ioc->sas_hba.num_phys)
@@ -5349,6 +5472,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
                switch (reason_code) {
                case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
 
+                       if (ioc->shost_recovery)
+                               break;
+
                        if (link_rate == prev_link_rate)
                                break;
 
@@ -5362,6 +5488,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
                        break;
                case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
 
+                       if (ioc->shost_recovery)
+                               break;
+
                        mpt2sas_transport_update_links(ioc, sas_address,
                            handle, phy_number, link_rate);
 
@@ -5622,7 +5751,7 @@ broadcast_aen_retry:
        termination_count = 0;
        query_count = 0;
        for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
-               if (ioc->ioc_reset_in_progress_status)
+               if (ioc->shost_recovery)
                        goto out;
                scmd = _scsih_scsi_lookup_get(ioc, smid);
                if (!scmd)
@@ -5644,7 +5773,7 @@ broadcast_aen_retry:
                lun = sas_device_priv_data->lun;
                query_count++;
 
-               if (ioc->ioc_reset_in_progress_status)
+               if (ioc->shost_recovery)
                        goto out;
 
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -5686,7 +5815,7 @@ broadcast_aen_retry:
                        goto broadcast_aen_retry;
                }
 
-               if (ioc->ioc_reset_in_progress_status)
+               if (ioc->shost_recovery)
                        goto out_no_lock;
 
                r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
@@ -5725,7 +5854,7 @@ broadcast_aen_retry:
            ioc->name, __func__, query_count, termination_count));
 
        ioc->broadcast_aen_busy = 0;
-       if (!ioc->ioc_reset_in_progress_status)
+       if (!ioc->shost_recovery)
                _scsih_ublock_io_all_device(ioc);
        mutex_unlock(&ioc->tm_cmds.mutex);
 }
@@ -5789,8 +5918,11 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
 static void
 _scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
 {
-       struct MPT2SAS_TARGET *sas_target_priv_data = starget->hostdata;
+       struct MPT2SAS_TARGET *sas_target_priv_data;
 
+       if (starget == NULL)
+               return;
+       sas_target_priv_data = starget->hostdata;
        if (no_uld_attach)
                sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
        else
@@ -5845,7 +5977,7 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
        raid_device->handle = handle;
        raid_device->wwid = wwid;
        _scsih_raid_device_add(ioc, raid_device);
-       if (!ioc->wait_for_port_enable_to_complete) {
+       if (!ioc->wait_for_discovery_to_complete) {
                rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
                    raid_device->id, 0);
                if (rc)
@@ -6127,6 +6259,10 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
                _scsih_sas_ir_config_change_event_debug(ioc, event_data);
 
 #endif
+
+       if (ioc->shost_recovery)
+               return;
+
        foreign_config = (le32_to_cpu(event_data->Flags) &
            MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
 
@@ -6185,6 +6321,9 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
        int rc;
        Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
 
+       if (ioc->shost_recovery)
+               return;
+
        if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
                return;
 
@@ -6267,6 +6406,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
        Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
        u64 sas_address;
 
+       if (ioc->shost_recovery)
+               return;
+
        if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
                return;
 
@@ -6510,10 +6652,10 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
        u32 device_info;
        u16 slot;
 
-       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+       printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name);
 
        if (list_empty(&ioc->sas_device_list))
-               return;
+               goto out;
 
        handle = 0xFFFF;
        while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -6532,6 +6674,9 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
                _scsih_mark_responding_sas_device(ioc, sas_address, slot,
                    handle);
        }
+out:
+       printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n",
+           ioc->name);
 }
 
 /**
@@ -6607,10 +6752,14 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
        u16 handle;
        u8 phys_disk_num;
 
-       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+       if (!ioc->ir_firmware)
+               return;
+
+       printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n",
+           ioc->name);
 
        if (list_empty(&ioc->raid_device_list))
-               return;
+               goto out;
 
        handle = 0xFFFF;
        while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -6649,6 +6798,9 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
                        set_bit(handle, ioc->pd_handles);
                }
        }
+out:
+       printk(MPT2SAS_INFO_FMT "search for responding raid volumes: "
+           "complete\n", ioc->name);
 }
 
 /**
@@ -6708,10 +6860,10 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
        u64 sas_address;
        u16 handle;
 
-       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
+       printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name);
 
        if (list_empty(&ioc->sas_expander_list))
-               return;
+               goto out;
 
        handle = 0xFFFF;
        while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
@@ -6730,6 +6882,8 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
                _scsih_mark_responding_expander(ioc, sas_address, handle);
        }
 
+ out:
+       printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name);
 }
 
 /**
@@ -6745,6 +6899,8 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
        struct _sas_node *sas_expander;
        struct _raid_device *raid_device, *raid_device_next;
 
+       printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
+           ioc->name);
 
        list_for_each_entry_safe(sas_device, sas_device_next,
            &ioc->sas_device_list, list) {
@@ -6764,6 +6920,9 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
                _scsih_remove_device(ioc, sas_device);
        }
 
+       if (!ioc->ir_firmware)
+               goto retry_expander_search;
+
        list_for_each_entry_safe(raid_device, raid_device_next,
            &ioc->raid_device_list, list) {
                if (raid_device->responding) {
@@ -6790,52 +6949,170 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
                mpt2sas_expander_remove(ioc, sas_expander->sas_address);
                goto retry_expander_search;
        }
+       printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
+           ioc->name);
+       /* unblock devices */
+       _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc,
+       struct _sas_node *sas_expander, u16 handle)
+{
+       Mpi2ExpanderPage1_t expander_pg1;
+       Mpi2ConfigReply_t mpi_reply;
+       int i;
+
+       for (i = 0 ; i < sas_expander->num_phys ; i++) {
+               if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply,
+                   &expander_pg1, i, handle))) {
+                       printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+                           ioc->name, __FILE__, __LINE__, __func__);
+                       return;
+               }
+
+               mpt2sas_transport_update_links(ioc, sas_expander->sas_address,
+                   le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+                   expander_pg1.NegotiatedLinkRate >> 4);
+       }
 }
 
 /**
- * _scsih_hide_unhide_sas_devices - add/remove device to/from OS
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
  * @ioc: per adapter object
  *
  * Return nothing.
  */
 static void
-_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc)
+_scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 {
-       struct _sas_device *sas_device, *sas_device_next;
+       Mpi2ExpanderPage0_t expander_pg0;
+       Mpi2SasDevicePage0_t sas_device_pg0;
+       Mpi2RaidVolPage1_t volume_pg1;
+       Mpi2RaidVolPage0_t volume_pg0;
+       Mpi2RaidPhysDiskPage0_t pd_pg0;
+       Mpi2EventIrConfigElement_t element;
+       Mpi2ConfigReply_t mpi_reply;
+       u8 phys_disk_num;
+       u16 ioc_status;
+       u16 handle, parent_handle;
+       u64 sas_address;
+       struct _sas_device *sas_device;
+       struct _sas_node *expander_device;
+       static struct _raid_device *raid_device;
 
-       if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag !=
-           MFG_PAGE10_HIDE_IF_VOL_PRESENT)
-               return;
+       printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
 
-       if (ioc->hide_drives) {
-               if (_scsih_get_num_volumes(ioc))
-                       return;
-               ioc->hide_drives = 0;
-               list_for_each_entry_safe(sas_device, sas_device_next,
-                   &ioc->sas_device_list, list) {
-                       if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
-                               sas_device->sas_address_parent)) {
-                               _scsih_sas_device_remove(ioc, sas_device);
-                       } else if (!sas_device->starget) {
-                               mpt2sas_transport_port_remove(ioc,
-                                   sas_device->sas_address,
-                                   sas_device->sas_address_parent);
-                               _scsih_sas_device_remove(ioc, sas_device);
-                       }
+       _scsih_sas_host_refresh(ioc);
+
+       /* expanders */
+       handle = 0xFFFF;
+       while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+           MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+               ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+                   MPI2_IOCSTATUS_MASK;
+               if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+                       break;
+               handle = le16_to_cpu(expander_pg0.DevHandle);
+               expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+                   ioc, le64_to_cpu(expander_pg0.SASAddress));
+               if (expander_device)
+                       _scsih_refresh_expander_links(ioc, expander_device,
+                           handle);
+               else
+                       _scsih_expander_add(ioc, handle);
+       }
+
+       if (!ioc->ir_firmware)
+               goto skip_to_sas;
+
+       /* phys disk */
+       phys_disk_num = 0xFF;
+       while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+           &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+           phys_disk_num))) {
+               ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+                   MPI2_IOCSTATUS_MASK;
+               if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+                       break;
+               phys_disk_num = pd_pg0.PhysDiskNum;
+               handle = le16_to_cpu(pd_pg0.DevHandle);
+               sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+               if (sas_device)
+                       continue;
+               if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+                   &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+                   handle) != 0)
+                       continue;
+               parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+               if (!_scsih_get_sas_address(ioc, parent_handle,
+                   &sas_address)) {
+                       mpt2sas_transport_update_links(ioc, sas_address,
+                           handle, sas_device_pg0.PhyNum,
+                           MPI2_SAS_NEG_LINK_RATE_1_5);
+                       set_bit(handle, ioc->pd_handles);
+                       _scsih_add_device(ioc, handle, 0, 1);
                }
-       } else {
-               if (!_scsih_get_num_volumes(ioc))
-                       return;
-               ioc->hide_drives = 1;
-               list_for_each_entry_safe(sas_device, sas_device_next,
-                   &ioc->sas_device_list, list) {
-                       mpt2sas_transport_port_remove(ioc,
-                           sas_device->sas_address,
-                           sas_device->sas_address_parent);
+       }
+
+       /* volumes */
+       handle = 0xFFFF;
+       while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+           &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+               ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+                   MPI2_IOCSTATUS_MASK;
+               if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+                       break;
+               handle = le16_to_cpu(volume_pg1.DevHandle);
+               raid_device = _scsih_raid_device_find_by_wwid(ioc,
+                   le64_to_cpu(volume_pg1.WWID));
+               if (raid_device)
+                       continue;
+               if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+                   &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+                    sizeof(Mpi2RaidVolPage0_t)))
+                       continue;
+               if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+                   volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+                   volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+                       memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+                       element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+                       element.VolDevHandle = volume_pg1.DevHandle;
+                       _scsih_sas_volume_add(ioc, &element);
                }
        }
+
+ skip_to_sas:
+
+       /* sas devices */
+       handle = 0xFFFF;
+       while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+           &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+           handle))) {
+               ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+                   MPI2_IOCSTATUS_MASK;
+               if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+                       break;
+               handle = le16_to_cpu(sas_device_pg0.DevHandle);
+               if (!(_scsih_is_end_device(
+                   le32_to_cpu(sas_device_pg0.DeviceInfo))))
+                       continue;
+               sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+                   le64_to_cpu(sas_device_pg0.SASAddress));
+               if (sas_device)
+                       continue;
+               parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+               if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+                       mpt2sas_transport_update_links(ioc, sas_address, handle,
+                           sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+                       _scsih_add_device(ioc, handle, 0, 0);
+               }
+       }
+
+       printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
 }
 
+
 /**
  * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
  * @ioc: per adapter object
@@ -6871,7 +7148,6 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
                }
                _scsih_fw_event_cleanup_queue(ioc);
                _scsih_flush_running_cmds(ioc);
-               _scsih_queue_rescan(ioc);
                break;
        case MPT2_IOC_DONE_RESET:
                dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -6881,6 +7157,13 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
                _scsih_search_responding_sas_devices(ioc);
                _scsih_search_responding_raid_devices(ioc);
                _scsih_search_responding_expanders(ioc);
+               if (!ioc->is_driver_loading) {
+                       _scsih_prep_device_scan(ioc);
+                       _scsih_search_responding_sas_devices(ioc);
+                       _scsih_search_responding_raid_devices(ioc);
+                       _scsih_search_responding_expanders(ioc);
+                       _scsih_error_recovery_delete_devices(ioc);
+               }
                break;
        }
 }
@@ -6898,7 +7181,6 @@ _firmware_event_work(struct work_struct *work)
 {
        struct fw_event_work *fw_event = container_of(work,
            struct fw_event_work, delayed_work.work);
-       unsigned long flags;
        struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
 
        /* the queue is being flushed so ignore this event */
@@ -6908,23 +7190,21 @@ _firmware_event_work(struct work_struct *work)
                return;
        }
 
-       if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
-               _scsih_fw_event_free(ioc, fw_event);
-               spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-               if (ioc->shost_recovery) {
-                       init_completion(&ioc->shost_recovery_done);
-                       spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
-                           flags);
-                       wait_for_completion(&ioc->shost_recovery_done);
-               } else
-                       spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
-                           flags);
+       switch (fw_event->event) {
+       case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
+               while (scsi_host_in_recovery(ioc->shost))
+                       ssleep(1);
                _scsih_remove_unresponding_sas_devices(ioc);
-               _scsih_hide_unhide_sas_devices(ioc);
-               return;
-       }
+               _scsih_scan_for_devices_after_reset(ioc);
+               break;
+       case MPT2SAS_PORT_ENABLE_COMPLETE:
+               ioc->start_scan = 0;
 
-       switch (fw_event->event) {
+
+
+               dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+                   "from worker thread\n", ioc->name));
+               break;
        case MPT2SAS_TURN_ON_FAULT_LED:
                _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
                break;
@@ -7121,6 +7401,8 @@ static struct scsi_host_template scsih_driver_template = {
        .slave_configure                = _scsih_slave_configure,
        .target_destroy                 = _scsih_target_destroy,
        .slave_destroy                  = _scsih_slave_destroy,
+       .scan_finished                  = _scsih_scan_finished,
+       .scan_start                     = _scsih_scan_start,
        .change_queue_depth             = _scsih_change_queue_depth,
        .change_queue_type              = _scsih_change_queue_type,
        .eh_abort_handler               = _scsih_abort,
@@ -7381,7 +7663,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
        unsigned long flags;
        int rc;
 
+        /* no Bios, return immediately */
+       if (!ioc->bios_pg3.BiosVersion)
+               return;
+
        device = NULL;
+       is_raid = 0;
        if (ioc->req_boot_device.device) {
                device =  ioc->req_boot_device.device;
                is_raid = ioc->req_boot_device.is_raid;
@@ -7417,8 +7704,9 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
                    sas_device->sas_address_parent)) {
                        _scsih_sas_device_remove(ioc, sas_device);
                } else if (!sas_device->starget) {
-                       mpt2sas_transport_port_remove(ioc, sas_address,
-                           sas_address_parent);
+                       if (!ioc->is_driver_loading)
+                               mpt2sas_transport_port_remove(ioc, sas_address,
+                                       sas_address_parent);
                        _scsih_sas_device_remove(ioc, sas_device);
                }
        }
@@ -7462,22 +7750,28 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
        /* SAS Device List */
        list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
            list) {
-               spin_lock_irqsave(&ioc->sas_device_lock, flags);
-               list_move_tail(&sas_device->list, &ioc->sas_device_list);
-               spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
                if (ioc->hide_drives)
                        continue;
 
                if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
                    sas_device->sas_address_parent)) {
-                       _scsih_sas_device_remove(ioc, sas_device);
+                       list_del(&sas_device->list);
+                       kfree(sas_device);
+                       continue;
                } else if (!sas_device->starget) {
-                       mpt2sas_transport_port_remove(ioc,
-                           sas_device->sas_address,
-                           sas_device->sas_address_parent);
-                       _scsih_sas_device_remove(ioc, sas_device);
+                       if (!ioc->is_driver_loading)
+                               mpt2sas_transport_port_remove(ioc,
+                                       sas_device->sas_address,
+                                       sas_device->sas_address_parent);
+                       list_del(&sas_device->list);
+                       kfree(sas_device);
+                       continue;
+
                }
+               spin_lock_irqsave(&ioc->sas_device_lock, flags);
+               list_move_tail(&sas_device->list, &ioc->sas_device_list);
+               spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
        }
 }
 
@@ -7490,9 +7784,7 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
 static void
 _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
 {
-       u16 volume_mapping_flags =
-           le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
-           MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+       u16 volume_mapping_flags;
 
        if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
                return;  /* return when IOC doesn't support initiator mode */
@@ -7500,18 +7792,93 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
        _scsih_probe_boot_devices(ioc);
 
        if (ioc->ir_firmware) {
-               if ((volume_mapping_flags &
-                    MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) {
-                       _scsih_probe_sas(ioc);
+               volume_mapping_flags =
+                   le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+                   MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+               if (volume_mapping_flags ==
+                   MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
                        _scsih_probe_raid(ioc);
+                       _scsih_probe_sas(ioc);
                } else {
-                       _scsih_probe_raid(ioc);
                        _scsih_probe_sas(ioc);
+                       _scsih_probe_raid(ioc);
                }
        } else
                _scsih_probe_sas(ioc);
 }
 
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus.  In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+       struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+       int rc;
+
+       if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+               mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+       ioc->start_scan = 1;
+       rc = mpt2sas_port_enable(ioc);
+
+       if (rc != 0)
+               printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodically until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+       if (time >= (300 * HZ)) {
+               ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+               printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
+                   "(timeout=300s)\n", ioc->name);
+               ioc->is_driver_loading = 0;
+               return 1;
+       }
+
+       if (ioc->start_scan)
+               return 0;
+
+       if (ioc->start_scan_failed) {
+               printk(MPT2SAS_INFO_FMT "port enable: FAILED with "
+                   "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed);
+               ioc->is_driver_loading = 0;
+               ioc->wait_for_discovery_to_complete = 0;
+               ioc->remove_host = 1;
+               return 1;
+       }
+
+       printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name);
+       ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+
+       if (ioc->wait_for_discovery_to_complete) {
+               ioc->wait_for_discovery_to_complete = 0;
+               _scsih_probe_devices(ioc);
+       }
+       mpt2sas_base_start_watchdog(ioc);
+       ioc->is_driver_loading = 0;
+       return 1;
+}
+
+
 /**
  * _scsih_probe - attach and add scsi host
  * @pdev: PCI device struct
@@ -7548,6 +7915,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        ioc->tm_cb_idx = tm_cb_idx;
        ioc->ctl_cb_idx = ctl_cb_idx;
        ioc->base_cb_idx = base_cb_idx;
+       ioc->port_enable_cb_idx = port_enable_cb_idx;
        ioc->transport_cb_idx = transport_cb_idx;
        ioc->scsih_cb_idx = scsih_cb_idx;
        ioc->config_cb_idx = config_cb_idx;
@@ -7620,14 +7988,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_thread_fail;
        }
 
-       ioc->wait_for_port_enable_to_complete = 1;
+       ioc->is_driver_loading = 1;
        if ((mpt2sas_base_attach(ioc))) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
                goto out_attach_fail;
        }
 
-       ioc->wait_for_port_enable_to_complete = 0;
+       scsi_scan_host(shost);
        if (ioc->is_warpdrive) {
                if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
                        ioc->hide_drives = 0;
@@ -7650,6 +8018,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  out_thread_fail:
        list_del(&ioc->list);
        scsi_remove_host(shost);
+       scsi_host_put(shost);
  out_add_shost_fail:
        return -ENODEV;
 }
@@ -7896,6 +8265,8 @@ _scsih_init(void)
 
        /* base internal commands callback handler */
        base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
+       port_enable_cb_idx = mpt2sas_base_register_callback_handler(
+               mpt2sas_port_enable_done);
 
        /* transport internal commands callback handler */
        transport_cb_idx = mpt2sas_base_register_callback_handler(
@@ -7950,6 +8321,7 @@ _scsih_exit(void)
        mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
        mpt2sas_base_release_callback_handler(tm_cb_idx);
        mpt2sas_base_release_callback_handler(base_cb_idx);
+       mpt2sas_base_release_callback_handler(port_enable_cb_idx);
        mpt2sas_base_release_callback_handler(transport_cb_idx);
        mpt2sas_base_release_callback_handler(scsih_cb_idx);
        mpt2sas_base_release_callback_handler(config_cb_idx);
index 621b5e0..6f58919 100644 (file)
@@ -732,6 +732,16 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
                .class_mask     = 0,
                .driver_data    = chip_9485,
        },
+       { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
+       { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+       { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
 
        { }     /* terminate list */
 };
index b86db84..5163edb 100644 (file)
@@ -4102,7 +4102,7 @@ static long pmcraid_chr_ioctl(
        struct pmcraid_ioctl_header *hdr = NULL;
        int retval = -ENOTTY;
 
-       hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
+       hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
 
        if (!hdr) {
                pmcraid_err("faile to allocate memory for ioctl header\n");
index 3474e86..2516adf 100644 (file)
@@ -2279,7 +2279,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
        ha = rsp->hw;
 
        /* Clear the interrupt, if enabled, for this response queue */
-       if (rsp->options & ~BIT_6) {
+       if (!ha->flags.disable_msix_handshake) {
                reg = &ha->iobase->isp24;
                spin_lock_irqsave(&ha->hardware_lock, flags);
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
index fc3f168..b4d43ae 100644 (file)
@@ -1698,6 +1698,15 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 
 void scsi_free_queue(struct request_queue *q)
 {
+       unsigned long flags;
+
+       WARN_ON(q->queuedata);
+
+       /* cause scsi_request_fn() to kill all non-finished requests */
+       spin_lock_irqsave(q->queue_lock, flags);
+       q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
        blk_cleanup_queue(q);
 }
 
index 44e8ca3..72273a0 100644 (file)
@@ -322,6 +322,7 @@ out_device_destroy:
        scsi_device_set_state(sdev, SDEV_DEL);
        transport_destroy_device(&sdev->sdev_gendev);
        put_device(&sdev->sdev_dev);
+       scsi_free_queue(sdev->request_queue);
        put_device(&sdev->sdev_gendev);
 out:
        if (display_failure_msg)
index 1bcd65a..96029e6 100644 (file)
@@ -520,7 +520,7 @@ fail_host_msg:
 /**
  * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests
  * @shost: shost for iscsi_host
- * @cls_host: iscsi_cls_host adding the structures to
+ * @ihost: iscsi_cls_host adding the structures to
  */
 static int
 iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
index a7942e5..fa3a591 100644 (file)
@@ -2590,18 +2590,16 @@ static int sd_probe(struct device *dev)
                spin_unlock(&sd_index_lock);
        } while (error == -EAGAIN);
 
-       if (error)
+       if (error) {
+               sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
                goto out_put;
-
-       if (index >= SD_MAX_DISKS) {
-               error = -ENODEV;
-               sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
-               goto out_free_index;
        }
 
        error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
-       if (error)
+       if (error) {
+               sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
                goto out_free_index;
+       }
 
        sdkp->device = sdp;
        sdkp->driver = &sd_template;
index 6ad798b..4163f29 100644 (file)
@@ -8,12 +8,6 @@
  */
 #define SD_MAJORS      16
 
-/*
- * This is limited by the naming scheme enforced in sd_probe,
- * add another character to it if you really need more disks.
- */
-#define SD_MAX_DISKS   (((26 * 26) + 26 + 1) * 26)
-
 /*
  * Time out in seconds for disks and Magneto-opticals (which are slower).
  */
index 1871b8a..9b28f39 100644 (file)
@@ -462,14 +462,16 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
 {
        struct st_request *SRpnt = req->end_io_data;
        struct scsi_tape *STp = SRpnt->stp;
+       struct bio *tmp;
 
        STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
        STp->buffer->cmdstat.residual = req->resid_len;
 
+       tmp = SRpnt->bio;
        if (SRpnt->waiting)
                complete(SRpnt->waiting);
 
-       blk_rq_unmap_user(SRpnt->bio);
+       blk_rq_unmap_user(tmp);
        __blk_put_request(req->q, req);
 }
 
index 1679ff6..3fdf251 100644 (file)
 
 #define PCI_VENDOR_ID_XEN              0x5853
 #define PCI_DEVICE_ID_XEN_PLATFORM     0x0001
+
+#define PCI_VENDOR_ID_OCZ              0x1b85