[SCSI] lpfc 8.3.39: Fix driver issues with large lpfc_sg_seg_cnt values
[pandora-kernel.git] / drivers / scsi / lpfc / lpfc_init.c
index 314b4f6..b2227fc 100644 (file)
@@ -541,13 +541,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 
        /* Set up ring-0 (ELS) timer */
        timeout = phba->fc_ratov * 2;
-       mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+       mod_timer(&vport->els_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * timeout));
        /* Set up heart beat (HB) timer */
-       mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+       mod_timer(&phba->hb_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
        /* Set up error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        if (phba->hba_flag & LINK_DISABLED) {
                lpfc_printf_log(phba,
@@ -839,7 +842,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
                 * way, nothing should be on txcmplq as it will NEVER complete.
                 */
                list_splice_init(&pring->txcmplq, &completions);
-               pring->txcmplq_cnt = 0;
                spin_unlock_irq(&phba->hbalock);
 
                /* Cancel all the IOCBs from the completions list */
@@ -1022,7 +1024,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
                !(phba->link_state == LPFC_HBA_ERROR) &&
                !(phba->pport->load_flag & FC_UNLOADING))
                mod_timer(&phba->hb_tmofunc,
-                       jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        return;
 }
 
@@ -1065,15 +1068,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->pport->work_port_lock);
 
-       if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
-               jiffies)) {
+       if (time_after(phba->last_completion_time +
+                       msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+                       jiffies)) {
                spin_unlock_irq(&phba->pport->work_port_lock);
                if (!phba->hb_outstanding)
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
                else
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                return;
        }
        spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1105,7 +1111,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                if (!pmboxq) {
                                        mod_timer(&phba->hb_tmofunc,
                                                 jiffies +
-                                                HZ * LPFC_HB_MBOX_INTERVAL);
+                                                msecs_to_jiffies(1000 *
+                                                LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
 
@@ -1121,7 +1128,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                                        phba->mbox_mem_pool);
                                        mod_timer(&phba->hb_tmofunc,
                                                jiffies +
-                                               HZ * LPFC_HB_MBOX_INTERVAL);
+                                               msecs_to_jiffies(1000 *
+                                               LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
                                phba->skipped_hb = 0;
@@ -1137,7 +1145,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                phba->skipped_hb = jiffies;
 
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                                jiffies +
+                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                        return;
                } else {
                        /*
@@ -1151,7 +1160,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        jiffies_to_msecs(jiffies
                                                 - phba->last_completion_time));
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                }
        }
 }
@@ -2634,6 +2644,7 @@ lpfc_online(struct lpfc_hba *phba)
        struct lpfc_vport *vport;
        struct lpfc_vport **vports;
        int i;
+       bool vpis_cleared = false;
 
        if (!phba)
                return 0;
@@ -2657,6 +2668,10 @@ lpfc_online(struct lpfc_hba *phba)
                        lpfc_unblock_mgmt_io(phba);
                        return 1;
                }
+               spin_lock_irq(&phba->hbalock);
+               if (!phba->sli4_hba.max_cfg_param.vpi_used)
+                       vpis_cleared = true;
+               spin_unlock_irq(&phba->hbalock);
        } else {
                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
                        lpfc_unblock_mgmt_io(phba);
@@ -2673,8 +2688,13 @@ lpfc_online(struct lpfc_hba *phba)
                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
+                       if (phba->sli_rev == LPFC_SLI_REV4) {
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+                               if ((vpis_cleared) &&
+                                   (vports[i]->port_type !=
+                                       LPFC_PHYSICAL_PORT))
+                                       vports[i]->vpi = 0;
+                       }
                        spin_unlock_irq(shost->host_lock);
                }
                lpfc_destroy_vport_work_array(phba, vports);
@@ -2915,9 +2935,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                        sglq_entry->state = SGL_FREED;
                        list_add_tail(&sglq_entry->list, &els_sgl_list);
                }
-               spin_lock(&phba->hbalock);
+               spin_lock_irq(&phba->hbalock);
                list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
-               spin_unlock(&phba->hbalock);
+               spin_unlock_irq(&phba->hbalock);
        } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
                /* els xri-sgl shrinked */
                xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3015,9 +3035,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                psb->cur_iocbq.sli4_lxritag = lxri;
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
-       spin_lock(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_lock);
        list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_lock);
 
        return 0;
 
@@ -3198,14 +3218,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                stat = 1;
                goto finished;
        }
-       if (time >= 30 * HZ) {
+       if (time >= msecs_to_jiffies(30 * 1000)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0461 Scanning longer than 30 "
                                "seconds.  Continuing initialization\n");
                stat = 1;
                goto finished;
        }
-       if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+       if (time >= msecs_to_jiffies(15 * 1000) &&
+           phba->link_state <= LPFC_LINK_DOWN) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0465 Link down longer than 15 "
                                "seconds.  Continuing initialization\n");
@@ -3217,7 +3238,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                goto finished;
        if (vport->num_disc_nodes || vport->fc_prli_sent)
                goto finished;
-       if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+       if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
                goto finished;
        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
                goto finished;
@@ -4003,6 +4024,52 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
        lpfc_destroy_vport_work_array(phba, vports);
 }
 
+/**
+ * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform FCF recovery when the in-use FCF either dead or
+ * got modified.
+ **/
+static void
+lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
+                                    struct lpfc_acqe_fip *acqe_fip)
+{
+       int rc;
+
+       spin_lock_irq(&phba->hbalock);
+       /* Mark the fast failover process in progress */
+       phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+       spin_unlock_irq(&phba->hbalock);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                       "2771 Start FCF fast failover process due to in-use "
+                       "FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
+                       acqe_fip->event_tag, acqe_fip->index);
+       rc = lpfc_sli4_redisc_fcf_table(phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                               "2772 Issue FCF rediscover mabilbox command "
+                               "failed, fail through to FCF dead event\n");
+               spin_lock_irq(&phba->hbalock);
+               phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+               spin_unlock_irq(&phba->hbalock);
+               /*
+                * Last resort will fail over by treating this as a link
+                * down to FCF registration.
+                */
+               lpfc_sli4_fcf_dead_failthrough(phba);
+       } else {
+               /* Reset FCF roundrobin bmask for new discovery */
+               lpfc_sli4_clear_fcf_rr_bmask(phba);
+               /*
+                * Handling fast FCF failover to a DEAD FCF event is
+                * considered equalivant to receiving CVL to all vports.
+                */
+               lpfc_sli4_perform_all_vport_cvl(phba);
+       }
+}
+
 /**
  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  * @phba: pointer to lpfc hba data structure.
@@ -4068,9 +4135,22 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                        break;
                }
 
-               /* If the FCF has been in discovered state, do nothing. */
-               if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+               /* If FCF has been in discovered state, perform rediscovery
+                * only if the FCF with the same index of the in-use FCF got
+                * modified during normal operation. Otherwise, do nothing.
+                */
+               if (phba->pport->port_state > LPFC_FLOGI) {
                        spin_unlock_irq(&phba->hbalock);
+                       if (phba->fcf.current_rec.fcf_indx ==
+                           acqe_fip->index) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+                                               "3300 In-use FCF (%d) "
+                                               "modified, perform FCF "
+                                               "rediscovery\n",
+                                               acqe_fip->index);
+                               lpfc_sli4_perform_inuse_fcf_recovery(phba,
+                                                                    acqe_fip);
+                       }
                        break;
                }
                spin_unlock_irq(&phba->hbalock);
@@ -4123,39 +4203,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                 * is no longer valid as we are not in the middle of FCF
                 * failover process already.
                 */
-               spin_lock_irq(&phba->hbalock);
-               /* Mark the fast failover process in progress */
-               phba->fcf.fcf_flag |= FCF_DEAD_DISC;
-               spin_unlock_irq(&phba->hbalock);
-
-               lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-                               "2771 Start FCF fast failover process due to "
-                               "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
-                               "\n", acqe_fip->event_tag, acqe_fip->index);
-               rc = lpfc_sli4_redisc_fcf_table(phba);
-               if (rc) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
-                                       LOG_DISCOVERY,
-                                       "2772 Issue FCF rediscover mabilbox "
-                                       "command failed, fail through to FCF "
-                                       "dead event\n");
-                       spin_lock_irq(&phba->hbalock);
-                       phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
-                       spin_unlock_irq(&phba->hbalock);
-                       /*
-                        * Last resort will fail over by treating this
-                        * as a link down to FCF registration.
-                        */
-                       lpfc_sli4_fcf_dead_failthrough(phba);
-               } else {
-                       /* Reset FCF roundrobin bmask for new discovery */
-                       lpfc_sli4_clear_fcf_rr_bmask(phba);
-                       /*
-                        * Handling fast FCF failover to a DEAD FCF event is
-                        * considered equalivant to receiving CVL to all vports.
-                        */
-                       lpfc_sli4_perform_all_vport_cvl(phba);
-               }
+               lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
                break;
        case LPFC_FIP_EVENT_TYPE_CVL:
                phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
@@ -4189,7 +4237,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        shost = lpfc_shost_from_vport(vport);
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4690,7 +4739,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                        ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
 
        if (phba->cfg_enable_bg) {
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+               phba->cfg_sg_seg_cnt = LPFC_MAX_BPL_SEG_CNT;
                phba->cfg_sg_dma_buf_size +=
                        phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
        }
@@ -4768,7 +4817,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
-       int longs, sli_family;
+       int longs;
        int sges_per_segment;
 
        /* Before proceed, wait for POST done and device ready */
@@ -4852,6 +4901,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
        if (!phba->sli.ring)
                return -ENOMEM;
+
+       /*
+        * It doesn't matter what family our adapter is in, we are
+        * limited to 2 Pages, 512 SGEs, for our SGL.
+        * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+        */
+       max_buf_size = (2 * SLI4_PAGE_SIZE);
+       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+       max_buf_size += (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
        /*
         * Since the sg_tablesize is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4863,22 +4923,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                    (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
                    sizeof(struct sli4_sge)));
 
-       sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
-       max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
-       switch (sli_family) {
-       case LPFC_SLI_INTF_FAMILY_BE2:
-       case LPFC_SLI_INTF_FAMILY_BE3:
-               /* There is a single hint for BE - 2 pages per BPL. */
-               if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
-                   LPFC_SLI_INTF_SLI_HINT1_1)
-                       max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-               break;
-       case LPFC_SLI_INTF_FAMILY_LNCR_A0:
-       case LPFC_SLI_INTF_FAMILY_LNCR_B0:
-       default:
-               break;
-       }
-
        for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
             dma_buf_size < max_buf_size && buf_size > dma_buf_size;
             dma_buf_size = dma_buf_size << 1)