Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[pandora-kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
index 98999bb..8b799f0 100644 (file)
@@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
        if (rrq) {
                rrq->send_rrq = send_rrq;
-               rrq->xritag = phba->sli4_hba.xri_ids[xritag];
+               rrq->xritag = xritag;
                rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
                rrq->ndlp = ndlp;
                rrq->nlp_DID = ndlp->nlp_DID;
@@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                /* search continue save q for same XRI */
                list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
-                       if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
+                       if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
+                               saveq->iocb.unsli3.rcvsli3.ox_id) {
                                list_add_tail(&saveq->list, &iocbq->list);
                                found = 1;
                                break;
@@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
                                                           irspiocbq);
                        break;
                case CQE_CODE_RECEIVE:
+               case CQE_CODE_RECEIVE_V1:
                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
                                              cq_event);
                        lpfc_sli4_handle_received_buffer(phba, dmabuf);
@@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
  * @phba: Pointer to HBA context object.
  * @type: The resource extent type.
+ * @extnt_count: buffer to hold port available extent count.
+ * @extnt_size: buffer to hold element count per extent.
  *
- * This function allocates all SLI4 resource identifiers.
+ * This function calls the port and retrievs the number of available
+ * extents and their size for a particular extent type.
+ *
+ * Returns: 0 if successful.  Nonzero otherwise.
  **/
-static int
+int
 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
                               uint16_t *extnt_count, uint16_t *extnt_size)
 {
@@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
                                     req_len, *emb);
        if (alloc_len < req_len) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "9000 Allocated DMA memory size (x%x) is "
+                       "2982 Allocated DMA memory size (x%x) is "
                        "less than the requested DMA memory "
                        "size (x%x)\n", alloc_len, req_len);
                return -ENOMEM;
@@ -5505,6 +5512,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
        return 0;
 }
 
+/**
+ * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port extent count response
+ * @extnt_size: buffer to hold port extent size response.
+ *
+ * This function calls the port to read the host allocated extents
+ * for a particular type.
+ **/
+int
+lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
+                              uint16_t *extnt_cnt, uint16_t *extnt_size)
+{
+       bool emb;
+       int rc = 0;
+       uint16_t curr_blks = 0;
+       uint32_t req_len, emb_len;
+       uint32_t alloc_len, mbox_tmo;
+       struct list_head *blk_list_head;
+       struct lpfc_rsrc_blks *rsrc_blk;
+       LPFC_MBOXQ_t *mbox;
+       void *virtaddr = NULL;
+       struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+       struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+       union  lpfc_sli4_cfg_shdr *shdr;
+
+       switch (type) {
+       case LPFC_RSC_TYPE_FCOE_VPI:
+               blk_list_head = &phba->lpfc_vpi_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_XRI:
+               blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_VFI:
+               blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_RPI:
+               blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
+               break;
+       default:
+               return -EIO;
+       }
+
+       /* Count the number of extents currently allocatd for this type. */
+       list_for_each_entry(rsrc_blk, blk_list_head, list) {
+               if (curr_blks == 0) {
+                       /*
+                        * The GET_ALLOCATED mailbox does not return the size,
+                        * just the count.  The size should be just the size
+                        * stored in the current allocated block and all sizes
+                        * for an extent type are the same so set the return
+                        * value now.
+                        */
+                       *extnt_size = rsrc_blk->rsrc_size;
+               }
+               curr_blks++;
+       }
+
+       /* Calculate the total requested length of the dma memory. */
+       req_len = curr_blks * sizeof(uint16_t);
+
+       /*
+        * Calculate the size of an embedded mailbox.  The uint32_t
+        * accounts for extents-specific word.
+        */
+       emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+               sizeof(uint32_t);
+
+       /*
+        * Presume the allocation and response will fit into an embedded
+        * mailbox.  If not true, reconfigure to a non-embedded mailbox.
+        */
+       emb = LPFC_SLI4_MBX_EMBED;
+       req_len = emb_len;
+       if (req_len > emb_len) {
+               req_len = curr_blks * sizeof(uint16_t) +
+                       sizeof(union lpfc_sli4_cfg_shdr) +
+                       sizeof(uint32_t);
+               emb = LPFC_SLI4_MBX_NEMBED;
+       }
+
+       mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
+
+       alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                                    LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
+                                    req_len, emb);
+       if (alloc_len < req_len) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2983 Allocated DMA memory size (x%x) is "
+                       "less than the requested DMA memory "
+                       "size (x%x)\n", alloc_len, req_len);
+               rc = -ENOMEM;
+               goto err_exit;
+       }
+       rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto err_exit;
+       }
+
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto err_exit;
+       }
+
+       /*
+        * Figure out where the response is located.  Then get local pointers
+        * to the response data.  The port does not guarantee to respond to
+        * all extents counts request so update the local variable with the
+        * allocated count from the port.
+        */
+       if (emb == LPFC_SLI4_MBX_EMBED) {
+               rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+               shdr = &rsrc_ext->header.cfg_shdr;
+               *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+       } else {
+               virtaddr = mbox->sge_array->addr[0];
+               n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+               shdr = &n_rsrc->cfg_shdr;
+               *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+       }
+
+       if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+                       "2984 Failed to read allocated resources "
+                       "for type %d - Status 0x%x Add'l Status 0x%x.\n",
+                       type,
+                       bf_get(lpfc_mbox_hdr_status, &shdr->response),
+                       bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
+               rc = -EIO;
+               goto err_exit;
+       }
+ err_exit:
+       lpfc_sli4_mbox_cmd_free(phba, mbox);
+       return rc;
+}
+
 /**
  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
  * @phba: Pointer to HBA context object.
@@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                                        "Advanced Error Reporting (AER)\n");
                        phba->cfg_aer_support = 0;
                }
+               rc = 0;
        }
 
        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
        unsigned long iflags;
        int rc;
 
+       /* dump from issue mailbox command if setup */
+       lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
+
        rc = lpfc_mbox_dev_check(phba);
        if (unlikely(rc)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
                bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
                bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
-       break;
+               break;
        case CMD_XMIT_SEQUENCE64_CX:
                bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
                       iocbq->iocb.un.ulpWord[3]);
                bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
-                      iocbq->iocb.ulpContext);
+                      iocbq->iocb.unsli3.rcvsli3.ox_id);
                /* The entire sequence is transmitted for this IOCB */
                xmit_len = total_len;
                cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
                wqe->xmit_sequence.xmit_len = xmit_len;
                command_type = OTHER_COMMAND;
-       break;
+               break;
        case CMD_XMIT_BCAST64_CN:
                /* word3 iocb=iotag32 wqe=seq_payload_len */
                wqe->xmit_bcast64.seq_payload_len = xmit_len;
@@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
                       LPFC_WQE_LENLOC_WORD3);
                bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
-       break;
+               break;
        case CMD_FCP_IWRITE64_CR:
                command_type = FCP_COMMAND_DATA_OUT;
                /* word3 iocb=iotag wqe=payload_offset_len */
@@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                       LPFC_WQE_LENLOC_WORD4);
                bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
                bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
-       break;
+               break;
        case CMD_FCP_IREAD64_CR:
                /* word3 iocb=iotag wqe=payload_offset_len */
                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
@@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                       LPFC_WQE_LENLOC_WORD4);
                bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
                bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
-       break;
+               break;
        case CMD_FCP_ICMND64_CR:
                /* word3 iocb=IO_TAG wqe=reserved */
                wqe->fcp_icmd.rsrvd3 = 0;
@@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
                       LPFC_WQE_LENLOC_NONE);
                bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
-       break;
+               break;
        case CMD_GEN_REQUEST64_CR:
                /* For this command calculate the xmit length of the
                 * request bde.
@@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
                bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
                command_type = OTHER_COMMAND;
-       break;
+               break;
        case CMD_XMIT_ELS_RSP64_CX:
                ndlp = (struct lpfc_nodelist *)iocbq->context1;
                /* words0-2 BDE memcpy */
@@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
                bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
                bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
-                      iocbq->iocb.ulpContext);
+                      iocbq->iocb.unsli3.rcvsli3.ox_id);
                if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
                        bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
                               phba->vpi_ids[iocbq->vport->vpi]);
@@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
                command_type = OTHER_COMMAND;
-       break;
+               break;
        case CMD_CLOSE_XRI_CN:
        case CMD_ABORT_XRI_CN:
        case CMD_ABORT_XRI_CX:
@@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                cmnd = CMD_ABORT_XRI_CX;
                command_type = OTHER_COMMAND;
                xritag = 0;
-       break;
+               break;
        case CMD_XMIT_BLS_RSP64_CX:
                /* As BLS ABTS RSP WQE is very different from other WQEs,
                 * we re-construct this WQE here based on information in
@@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                               bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
                }
 
-       break;
+               break;
        case CMD_XRI_ABORTED_CX:
        case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
        case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                                "2014 Invalid command 0x%x\n",
                                iocbq->iocb.ulpCommand);
                return IOCB_ERROR;
-       break;
+               break;
        }
 
        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
@@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
        struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
        struct hbq_dmabuf *dma_buf;
-       uint32_t status;
+       uint32_t status, rq_id;
        unsigned long iflags;
 
-       if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
+       if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+               rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+       else
+               rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+       if (rq_id != hrq->queue_id)
                goto out;
 
        status = bf_get(lpfc_rcqe_status, rcqe);
@@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                                (struct sli4_wcqe_xri_aborted *)&cqevt);
                break;
        case CQE_CODE_RECEIVE:
+       case CQE_CODE_RECEIVE_V1:
                /* Process the RQ event */
                phba->last_completion_time = jiffies;
                workposted = lpfc_sli4_sp_handle_rcqe(phba,
@@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to post rpi header templates to the
- * port for those SLI4 ports that do not support extents.  This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
- * and should be called only when interrupts are disabled.
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  *
- * Return codes
- *     0 - successful
- *     -ERROR - otherwise.
- */
+ * Returns
+ *     A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ *     LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
 uint16_t
 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
 {
@@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
  * This function validates the xri maps to the known range of XRIs allocated an
  * used by the driver.
  **/
-static uint16_t
+uint16_t
 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
                      uint16_t xri)
 {
@@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
 static struct lpfc_iocbq *
 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 {
+       struct hbq_dmabuf *hbq_buf;
        struct lpfc_dmabuf *d_buf, *n_buf;
        struct lpfc_iocbq *first_iocbq, *iocbq;
        struct fc_frame_header *fc_hdr;
        uint32_t sid;
+       uint32_t len, tot_len;
        struct ulp_bde64 *pbde;
 
        fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
@@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
        lpfc_update_rcv_time_stamp(vport);
        /* get the Remote Port's SID */
        sid = sli4_sid_from_fc_hdr(fc_hdr);
+       tot_len = 0;
        /* Get an iocbq struct to fill in. */
        first_iocbq = lpfc_sli_get_iocbq(vport->phba);
        if (first_iocbq) {
@@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
                first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
                first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
-               first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
-               /* iocbq is prepped for internal consumption.  Logical vpi. */
-               first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
+               first_iocbq->iocb.ulpContext = NO_XRI;
+               first_iocbq->iocb.unsli3.rcvsli3.ox_id =
+                       be16_to_cpu(fc_hdr->fh_ox_id);
+               /* iocbq is prepped for internal consumption.  Physical vpi. */
+               first_iocbq->iocb.unsli3.rcvsli3.vpi =
+                       vport->phba->vpi_ids[vport->vpi];
                /* put the first buffer into the first IOCBq */
                first_iocbq->context2 = &seq_dmabuf->dbuf;
                first_iocbq->context3 = NULL;
@@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
                                                        LPFC_DATA_BUF_SIZE;
                first_iocbq->iocb.un.rcvels.remoteID = sid;
-               first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
-                               bf_get(lpfc_rcqe_length,
+               tot_len = bf_get(lpfc_rcqe_length,
                                       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+               first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
        }
        iocbq = first_iocbq;
        /*
@@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                        pbde = (struct ulp_bde64 *)
                                        &iocbq->iocb.unsli3.sli3Words[4];
                        pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
-                       first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
-                               bf_get(lpfc_rcqe_length,
-                                      &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+                       /* We need to get the size out of the right CQE */
+                       hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+                       len = bf_get(lpfc_rcqe_length,
+                                      &hbq_buf->cq_event.cqe.rcqe_cmpl);
+                       iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+                       tot_len += len;
                } else {
                        iocbq = lpfc_sli_get_iocbq(vport->phba);
                        if (!iocbq) {
@@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                        iocbq->iocb.ulpBdeCount = 1;
                        iocbq->iocb.un.cont64[0].tus.f.bdeSize =
                                                        LPFC_DATA_BUF_SIZE;
-                       first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
-                               bf_get(lpfc_rcqe_length,
-                                      &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+                       /* We need to get the size out of the right CQE */
+                       hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+                       len = bf_get(lpfc_rcqe_length,
+                                      &hbq_buf->cq_event.cqe.rcqe_cmpl);
+                       tot_len += len;
+                       iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+
                        iocbq->iocb.un.rcvels.remoteID = sid;
                        list_add_tail(&iocbq->list, &first_iocbq->list);
                }
@@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
                lpfc_in_buf_free(phba, &dmabuf->dbuf);
                return;
        }
-       fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
+       if ((bf_get(lpfc_cqe_code,
+                   &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
+               fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
+                             &dmabuf->cq_event.cqe.rcqe_cmpl);
+       else
+               fcfi = bf_get(lpfc_rcqe_fcf_id,
+                             &dmabuf->cq_event.cqe.rcqe_cmpl);
        vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
        if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
                /* throw out the frame */
@@ -14450,6 +14634,92 @@ fail_fcf_read:
        return error;
 }
 
+/**
+ * lpfc_check_next_fcf_pri
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+       uint16_t next_fcf_pri;
+       uint16_t last_index;
+       struct lpfc_fcf_pri *fcf_pri;
+       int rc;
+       int ret = 0;
+
+       last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+                       LPFC_SLI4_FCF_TBL_INDX_MAX);
+       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                       "3060 Last IDX %d\n", last_index);
+       if (list_empty(&phba->fcf.fcf_pri_list)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+                       "3061 Last IDX %d\n", last_index);
+               return 0; /* Empty rr list */
+       }
+       next_fcf_pri = 0;
+       /*
+        * Clear the rr_bmask and set all of the bits that are at this
+        * priority.
+        */
+       memset(phba->fcf.fcf_rr_bmask, 0,
+                       sizeof(*phba->fcf.fcf_rr_bmask));
+       spin_lock_irq(&phba->hbalock);
+       list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+               if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+                       continue;
+               /*
+                * the 1st priority that has not FLOGI failed
+                * will be the highest.
+                */
+               if (!next_fcf_pri)
+                       next_fcf_pri = fcf_pri->fcf_rec.priority;
+               spin_unlock_irq(&phba->hbalock);
+               if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+                       rc = lpfc_sli4_fcf_rr_index_set(phba,
+                                               fcf_pri->fcf_rec.fcf_index);
+                       if (rc)
+                               return 0;
+               }
+               spin_lock_irq(&phba->hbalock);
+       }
+       /*
+        * if next_fcf_pri was not set above and the list is not empty then
+        * we have failed flogis on all of them. So reset flogi failed
+        * and start at the begining.
+        */
+       if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+               list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+                       fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+                       /*
+                        * the 1st priority that has not FLOGI failed
+                        * will be the highest.
+                        */
+                       if (!next_fcf_pri)
+                               next_fcf_pri = fcf_pri->fcf_rec.priority;
+                       spin_unlock_irq(&phba->hbalock);
+                       if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+                               rc = lpfc_sli4_fcf_rr_index_set(phba,
+                                               fcf_pri->fcf_rec.fcf_index);
+                               if (rc)
+                                       return 0;
+                       }
+                       spin_lock_irq(&phba->hbalock);
+               }
+       } else
+               ret = 1;
+       spin_unlock_irq(&phba->hbalock);
+
+       return ret;
+}
 /**
  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
  * @phba: pointer to lpfc hba data structure.
@@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
        uint16_t next_fcf_index;
 
        /* Search start from next bit of currently registered FCF index */
+next_priority:
        next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
                                        LPFC_SLI4_FCF_TBL_INDX_MAX;
        next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
                                       next_fcf_index);
 
        /* Wrap around condition on phba->fcf.fcf_rr_bmask */
-       if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+       if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+               /*
+                * If we have wrapped then we need to clear the bits that
+                * have been tested so that we can detect when we should
+                * change the priority level.
+                */
                next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
                                               LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+       }
+
 
        /* Check roundrobin failover list empty condition */
-       if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+       if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+               next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+               /*
+                * If next fcf index is not found check if there are lower
+                * Priority level fcf's in the fcf_priority list.
+                * Set up the rr_bmask with all of the avaiable fcf bits
+                * at that level and continue the selection process.
+                */
+               if (lpfc_check_next_fcf_pri_level(phba))
+                       goto next_priority;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
                                "2844 No roundrobin failover FCF available\n");
-               return LPFC_FCOE_FCF_NEXT_NONE;
+               if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+                       return LPFC_FCOE_FCF_NEXT_NONE;
+               else {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+                               "3063 Only FCF available idx %d, flag %x\n",
+                               next_fcf_index,
+                       phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
+                       return next_fcf_index;
+               }
        }
 
+       if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+               phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+               LPFC_FCF_FLOGI_FAILED)
+               goto next_priority;
+
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
                        "2845 Get next roundrobin failover FCF (x%x)\n",
                        next_fcf_index);
@@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
 void
 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
 {
+       struct lpfc_fcf_pri *fcf_pri;
        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
                                "2762 FCF (x%x) reached driver's book "
@@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
                return;
        }
        /* Clear the eligible FCF record index bmask */
+       spin_lock_irq(&phba->hbalock);
+       list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+               if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+                       list_del_init(&fcf_pri->list);
+                       break;
+               }
+       }
+       spin_unlock_irq(&phba->hbalock);
        clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,