1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
39 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
41 static int lpfc_max_els_tries = 3;
44 lpfc_els_chk_latt(struct lpfc_hba * phba)
46 struct lpfc_sli *psli;
53 if ((phba->hba_state >= LPFC_HBA_READY) ||
54 (phba->hba_state == LPFC_LINK_DOWN))
57 /* Read the HBA Host Attention Register */
58 spin_lock_irq(phba->host->host_lock);
59 ha_copy = readl(phba->HAregaddr);
60 spin_unlock_irq(phba->host->host_lock);
62 if (!(ha_copy & HA_LATT))
65 /* Pending Link Event during Discovery */
66 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
67 "%d:0237 Pending Link Event during "
68 "Discovery: State x%x\n",
69 phba->brd_no, phba->hba_state);
71 /* CLEAR_LA should re-enable link attention events and
72 * we should then imediately take a LATT event. The
73 * LATT processing should call lpfc_linkdown() which
74 * will cleanup any left over in-progress discovery
77 spin_lock_irq(phba->host->host_lock);
78 phba->fc_flag |= FC_ABORT_DISCOVERY;
79 spin_unlock_irq(phba->host->host_lock);
81 if (phba->hba_state != LPFC_CLEAR_LA) {
82 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
83 phba->hba_state = LPFC_CLEAR_LA;
84 lpfc_clear_la(phba, mbox);
85 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
86 rc = lpfc_sli_issue_mbox (phba, mbox,
87 (MBX_NOWAIT | MBX_STOP_IOCB));
88 if (rc == MBX_NOT_FINISHED) {
89 mempool_free(mbox, phba->mbox_mem_pool);
90 phba->hba_state = LPFC_HBA_ERROR;
99 static struct lpfc_iocbq *
100 lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
101 uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp,
102 uint32_t did, uint32_t elscmd)
104 struct lpfc_sli_ring *pring;
105 struct lpfc_iocbq *elsiocb;
106 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
107 struct ulp_bde64 *bpl;
110 pring = &phba->sli.ring[LPFC_ELS_RING];
112 if (phba->hba_state < LPFC_LINK_UP)
115 /* Allocate buffer for command iocb */
116 spin_lock_irq(phba->host->host_lock);
117 elsiocb = lpfc_sli_get_iocbq(phba);
118 spin_unlock_irq(phba->host->host_lock);
122 icmd = &elsiocb->iocb;
124 /* fill in BDEs for command */
125 /* Allocate buffer for command payload */
126 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
127 ((pcmd->virt = lpfc_mbuf_alloc(phba,
128 MEM_PRI, &(pcmd->phys))) == 0)) {
131 spin_lock_irq(phba->host->host_lock);
132 lpfc_sli_release_iocbq(phba, elsiocb);
133 spin_unlock_irq(phba->host->host_lock);
137 INIT_LIST_HEAD(&pcmd->list);
139 /* Allocate buffer for response payload */
141 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
143 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
145 if (prsp == 0 || prsp->virt == 0) {
147 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
149 spin_lock_irq(phba->host->host_lock);
150 lpfc_sli_release_iocbq(phba, elsiocb);
151 spin_unlock_irq(phba->host->host_lock);
154 INIT_LIST_HEAD(&prsp->list);
159 /* Allocate buffer for Buffer ptr list */
160 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
162 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
164 if (pbuflist == 0 || pbuflist->virt == 0) {
165 spin_lock_irq(phba->host->host_lock);
166 lpfc_sli_release_iocbq(phba, elsiocb);
167 spin_unlock_irq(phba->host->host_lock);
168 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
169 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
176 INIT_LIST_HEAD(&pbuflist->list);
178 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
179 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
180 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
182 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
183 icmd->un.elsreq64.remoteID = did; /* DID */
184 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
186 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
187 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
190 icmd->ulpBdeCount = 1;
192 icmd->ulpClass = CLASS3;
194 bpl = (struct ulp_bde64 *) pbuflist->virt;
195 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
196 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
197 bpl->tus.f.bdeSize = cmdSize;
198 bpl->tus.f.bdeFlags = 0;
199 bpl->tus.w = le32_to_cpu(bpl->tus.w);
203 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
204 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
205 bpl->tus.f.bdeSize = FCELSSIZE;
206 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
207 bpl->tus.w = le32_to_cpu(bpl->tus.w);
210 /* Save for completion so we can release these resources */
211 elsiocb->context1 = (uint8_t *) ndlp;
212 elsiocb->context2 = (uint8_t *) pcmd;
213 elsiocb->context3 = (uint8_t *) pbuflist;
214 elsiocb->retry = retry;
215 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
218 list_add(&prsp->list, &pcmd->list);
222 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
223 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
224 "%d:0116 Xmit ELS command x%x to remote "
225 "NPORT x%x Data: x%x x%x\n",
226 phba->brd_no, elscmd,
227 did, icmd->ulpIoTag, phba->hba_state);
229 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
230 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
231 "%d:0117 Xmit ELS response x%x to remote "
232 "NPORT x%x Data: x%x x%x\n",
233 phba->brd_no, elscmd,
234 ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
242 lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
243 struct serv_parm *sp, IOCB_t *irsp)
248 spin_lock_irq(phba->host->host_lock);
249 phba->fc_flag |= FC_FABRIC;
250 spin_unlock_irq(phba->host->host_lock);
252 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
253 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
254 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
256 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
258 if (phba->fc_topology == TOPOLOGY_LOOP) {
259 spin_lock_irq(phba->host->host_lock);
260 phba->fc_flag |= FC_PUBLIC_LOOP;
261 spin_unlock_irq(phba->host->host_lock);
264 * If we are a N-port connected to a Fabric, fixup sparam's so
265 * logins to devices on remote loops work.
267 phba->fc_sparam.cmn.altBbCredit = 1;
270 phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
271 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
272 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
273 ndlp->nlp_class_sup = 0;
274 if (sp->cls1.classValid)
275 ndlp->nlp_class_sup |= FC_COS_CLASS1;
276 if (sp->cls2.classValid)
277 ndlp->nlp_class_sup |= FC_COS_CLASS2;
278 if (sp->cls3.classValid)
279 ndlp->nlp_class_sup |= FC_COS_CLASS3;
280 if (sp->cls4.classValid)
281 ndlp->nlp_class_sup |= FC_COS_CLASS4;
282 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
283 sp->cmn.bbRcvSizeLsb;
284 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
286 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
290 phba->hba_state = LPFC_FABRIC_CFG_LINK;
291 lpfc_config_link(phba, mbox);
292 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
294 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
295 if (rc == MBX_NOT_FINISHED)
298 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
306 * set_slim mailbox command needs to execute first,
307 * queue this command to be processed later.
309 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
310 mbox->context2 = ndlp;
312 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
313 if (rc == MBX_NOT_FINISHED)
319 mempool_free(mbox, phba->mbox_mem_pool);
325 * We FLOGIed into an NPort, initiate pt2pt protocol
328 lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
329 struct serv_parm *sp)
334 spin_lock_irq(phba->host->host_lock);
335 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
336 spin_unlock_irq(phba->host->host_lock);
338 phba->fc_edtov = FF_DEF_EDTOV;
339 phba->fc_ratov = FF_DEF_RATOV;
340 rc = memcmp(&phba->fc_portname, &sp->portName,
341 sizeof(struct lpfc_name));
343 /* This side will initiate the PLOGI */
344 spin_lock_irq(phba->host->host_lock);
345 phba->fc_flag |= FC_PT2PT_PLOGI;
346 spin_unlock_irq(phba->host->host_lock);
349 * N_Port ID cannot be 0, set our to LocalID the other
350 * side will be RemoteID.
355 phba->fc_myDID = PT2PT_LocalID;
357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
361 lpfc_config_link(phba, mbox);
363 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
364 rc = lpfc_sli_issue_mbox(phba, mbox,
365 MBX_NOWAIT | MBX_STOP_IOCB);
366 if (rc == MBX_NOT_FINISHED) {
367 mempool_free(mbox, phba->mbox_mem_pool);
370 mempool_free(ndlp, phba->nlp_mem_pool);
372 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID);
375 * Cannot find existing Fabric ndlp, so allocate a
378 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
382 lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
385 memcpy(&ndlp->nlp_portname, &sp->portName,
386 sizeof(struct lpfc_name));
387 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
388 sizeof(struct lpfc_name));
389 ndlp->nlp_state = NLP_STE_NPR_NODE;
390 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
391 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
393 /* This side will wait for the PLOGI */
394 mempool_free( ndlp, phba->nlp_mem_pool);
397 spin_lock_irq(phba->host->host_lock);
398 phba->fc_flag |= FC_PT2PT;
399 spin_unlock_irq(phba->host->host_lock);
401 /* Start discovery - this should just do CLEAR_LA */
402 lpfc_disc_start(phba);
409 lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
410 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
412 IOCB_t *irsp = &rspiocb->iocb;
413 struct lpfc_nodelist *ndlp = cmdiocb->context1;
414 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
415 struct serv_parm *sp;
418 /* Check to see if link went down during discovery */
419 if (lpfc_els_chk_latt(phba)) {
420 lpfc_nlp_remove(phba, ndlp);
424 if (irsp->ulpStatus) {
425 /* Check for retry */
426 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
427 /* ELS command is being retried */
430 /* FLOGI failed, so there is no fabric */
431 spin_lock_irq(phba->host->host_lock);
432 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
433 spin_unlock_irq(phba->host->host_lock);
435 /* If private loop, then allow max outstandting els to be
436 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
437 * alpa map would take too long otherwise.
439 if (phba->alpa_map[0] == 0) {
440 phba->cfg_discovery_threads =
441 LPFC_MAX_DISC_THREADS;
445 lpfc_printf_log(phba,
448 "%d:0100 FLOGI failure Data: x%x x%x x%x\n",
450 irsp->ulpStatus, irsp->un.ulpWord[4],
456 * The FLogI succeeded. Sync the data for the CPU before
459 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
461 sp = prsp->virt + sizeof(uint32_t);
463 /* FLOGI completes successfully */
464 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
465 "%d:0101 FLOGI completes sucessfully "
466 "Data: x%x x%x x%x x%x\n",
468 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
469 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
471 if (phba->hba_state == LPFC_FLOGI) {
473 * If Common Service Parameters indicate Nport
474 * we are point to point, if Fport we are Fabric.
477 rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
479 rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
486 lpfc_nlp_remove(phba, ndlp);
488 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
489 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
490 irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
491 /* FLOGI failed, so just use loop map to make discovery list */
492 lpfc_disc_list_loopmap(phba);
494 /* Start discovery */
495 lpfc_disc_start(phba);
499 lpfc_els_free_iocb(phba, cmdiocb);
503 lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
506 struct serv_parm *sp;
508 struct lpfc_iocbq *elsiocb;
509 struct lpfc_sli_ring *pring;
515 pring = &phba->sli.ring[LPFC_ELS_RING];
517 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
518 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
519 ndlp->nlp_DID, ELS_CMD_FLOGI);
523 icmd = &elsiocb->iocb;
524 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
526 /* For FLOGI request, remainder of payload is service parameters */
527 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
528 pcmd += sizeof (uint32_t);
529 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
530 sp = (struct serv_parm *) pcmd;
532 /* Setup CSPs accordingly for Fabric */
534 sp->cmn.w2.r_a_tov = 0;
535 sp->cls1.classValid = 0;
536 sp->cls2.seqDelivery = 1;
537 sp->cls3.seqDelivery = 1;
538 if (sp->cmn.fcphLow < FC_PH3)
539 sp->cmn.fcphLow = FC_PH3;
540 if (sp->cmn.fcphHigh < FC_PH3)
541 sp->cmn.fcphHigh = FC_PH3;
543 tmo = phba->fc_ratov;
544 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
545 lpfc_set_disctmo(phba);
546 phba->fc_ratov = tmo;
548 phba->fc_stat.elsXmitFLOGI++;
549 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
550 spin_lock_irq(phba->host->host_lock);
551 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
552 spin_unlock_irq(phba->host->host_lock);
553 if (rc == IOCB_ERROR) {
554 lpfc_els_free_iocb(phba, elsiocb);
561 lpfc_els_abort_flogi(struct lpfc_hba * phba)
563 struct lpfc_sli_ring *pring;
564 struct lpfc_iocbq *iocb, *next_iocb;
565 struct lpfc_nodelist *ndlp;
568 /* Abort outstanding I/O on NPort <nlp_DID> */
569 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
570 "%d:0201 Abort outstanding I/O on NPort x%x\n",
571 phba->brd_no, Fabric_DID);
573 pring = &phba->sli.ring[LPFC_ELS_RING];
576 * Check the txcmplq for an iocb that matches the nport the driver is
579 spin_lock_irq(phba->host->host_lock);
580 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
582 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
583 ndlp = (struct lpfc_nodelist *)(iocb->context1);
584 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
585 list_del(&iocb->list);
586 pring->txcmplq_cnt--;
588 if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
589 lpfc_sli_issue_abort_iotag32
592 if (iocb->iocb_cmpl) {
593 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
594 icmd->un.ulpWord[4] =
596 spin_unlock_irq(phba->host->host_lock);
597 (iocb->iocb_cmpl) (phba, iocb, iocb);
598 spin_lock_irq(phba->host->host_lock);
600 lpfc_sli_release_iocbq(phba, iocb);
604 spin_unlock_irq(phba->host->host_lock);
610 lpfc_initial_flogi(struct lpfc_hba * phba)
612 struct lpfc_nodelist *ndlp;
614 /* First look for the Fabric ndlp */
615 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, Fabric_DID);
617 /* Cannot find existing Fabric ndlp, so allocate a new one */
618 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
621 lpfc_nlp_init(phba, ndlp, Fabric_DID);
623 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
625 if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
626 mempool_free( ndlp, phba->nlp_mem_pool);
632 lpfc_more_plogi(struct lpfc_hba * phba)
636 if (phba->num_disc_nodes)
637 phba->num_disc_nodes--;
639 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
640 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
641 "%d:0232 Continue discovery with %d PLOGIs to go "
642 "Data: x%x x%x x%x\n",
643 phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
644 phba->fc_flag, phba->hba_state);
646 /* Check to see if there are more PLOGIs to be sent */
647 if (phba->fc_flag & FC_NLP_MORE) {
648 /* go thru NPR list and issue any remaining ELS PLOGIs */
649 sentplogi = lpfc_els_disc_plogi(phba);
654 static struct lpfc_nodelist *
655 lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
656 struct lpfc_nodelist *ndlp)
658 struct lpfc_nodelist *new_ndlp;
659 struct lpfc_dmabuf *pcmd, *prsp;
661 struct serv_parm *sp;
662 uint8_t name[sizeof (struct lpfc_name)];
665 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
666 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
667 lp = (uint32_t *) prsp->virt;
668 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
670 /* Now we to find out if the NPort we are logging into, matches the WWPN
671 * we have for that ndlp. If not, we have some work to do.
673 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
675 memset(name, 0, sizeof (struct lpfc_name));
676 rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
677 if (!rc || (new_ndlp == ndlp)) {
682 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
686 lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID);
689 lpfc_unreg_rpi(phba, new_ndlp);
690 new_ndlp->nlp_prev_state = ndlp->nlp_state;
691 new_ndlp->nlp_DID = ndlp->nlp_DID;
692 new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
693 lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST);
695 /* Move this back to NPR list */
696 lpfc_unreg_rpi(phba, ndlp);
697 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
698 ndlp->nlp_state = NLP_STE_NPR_NODE;
699 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
705 lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
706 struct lpfc_iocbq * rspiocb)
709 struct lpfc_nodelist *ndlp;
710 int disc, rc, did, type;
713 /* we pass cmdiocb to state machine which needs rspiocb as well */
714 cmdiocb->context_un.rsp_iocb = rspiocb;
716 irsp = &rspiocb->iocb;
717 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL,
718 irsp->un.elsreq64.remoteID);
722 /* Since ndlp can be freed in the disc state machine, note if this node
723 * is being used during discovery.
725 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
726 spin_lock_irq(phba->host->host_lock);
727 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
728 spin_unlock_irq(phba->host->host_lock);
731 /* PLOGI completes to NPort <nlp_DID> */
732 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
733 "%d:0102 PLOGI completes to NPort x%x "
734 "Data: x%x x%x x%x x%x x%x\n",
735 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
736 irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
737 phba->num_disc_nodes);
739 /* Check to see if link went down during discovery */
740 if (lpfc_els_chk_latt(phba)) {
741 spin_lock_irq(phba->host->host_lock);
742 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
743 spin_unlock_irq(phba->host->host_lock);
747 /* ndlp could be freed in DSM, save these values now */
748 type = ndlp->nlp_type;
751 if (irsp->ulpStatus) {
752 /* Check for retry */
753 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
754 /* ELS command is being retried */
756 spin_lock_irq(phba->host->host_lock);
757 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
758 spin_unlock_irq(phba->host->host_lock);
764 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
765 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
766 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
767 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
768 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
769 rc = NLP_STE_FREED_NODE;
771 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
775 /* Good status, call state machine */
776 ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp);
777 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
781 if (disc && phba->num_disc_nodes) {
782 /* Check to see if there are more PLOGIs to be sent */
783 lpfc_more_plogi(phba);
786 if (phba->num_disc_nodes == 0) {
787 spin_lock_irq(phba->host->host_lock);
788 phba->fc_flag &= ~FC_NDISC_ACTIVE;
789 spin_unlock_irq(phba->host->host_lock);
791 lpfc_can_disctmo(phba);
792 if (phba->fc_flag & FC_RSCN_MODE) {
793 /* Check to see if more RSCNs came in while we were
794 * processing this one.
796 if ((phba->fc_rscn_id_cnt == 0) &&
797 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
798 spin_lock_irq(phba->host->host_lock);
799 phba->fc_flag &= ~FC_RSCN_MODE;
800 spin_unlock_irq(phba->host->host_lock);
802 lpfc_els_handle_rscn(phba);
808 lpfc_els_free_iocb(phba, cmdiocb);
813 lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
815 struct serv_parm *sp;
817 struct lpfc_iocbq *elsiocb;
818 struct lpfc_sli_ring *pring;
819 struct lpfc_sli *psli;
824 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
826 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
827 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, 0, did,
832 icmd = &elsiocb->iocb;
833 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
835 /* For PLOGI request, remainder of payload is service parameters */
836 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
837 pcmd += sizeof (uint32_t);
838 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
839 sp = (struct serv_parm *) pcmd;
841 if (sp->cmn.fcphLow < FC_PH_4_3)
842 sp->cmn.fcphLow = FC_PH_4_3;
844 if (sp->cmn.fcphHigh < FC_PH3)
845 sp->cmn.fcphHigh = FC_PH3;
847 phba->fc_stat.elsXmitPLOGI++;
848 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
849 spin_lock_irq(phba->host->host_lock);
850 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
851 spin_unlock_irq(phba->host->host_lock);
852 lpfc_els_free_iocb(phba, elsiocb);
855 spin_unlock_irq(phba->host->host_lock);
860 lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
861 struct lpfc_iocbq * rspiocb)
864 struct lpfc_sli *psli;
865 struct lpfc_nodelist *ndlp;
868 /* we pass cmdiocb to state machine which needs rspiocb as well */
869 cmdiocb->context_un.rsp_iocb = rspiocb;
871 irsp = &(rspiocb->iocb);
872 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
873 spin_lock_irq(phba->host->host_lock);
874 ndlp->nlp_flag &= ~NLP_PRLI_SND;
875 spin_unlock_irq(phba->host->host_lock);
877 /* PRLI completes to NPort <nlp_DID> */
878 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
879 "%d:0103 PRLI completes to NPort x%x "
880 "Data: x%x x%x x%x x%x\n",
881 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
882 irsp->un.ulpWord[4], irsp->ulpTimeout,
883 phba->num_disc_nodes);
885 phba->fc_prli_sent--;
886 /* Check to see if link went down during discovery */
887 if (lpfc_els_chk_latt(phba))
890 if (irsp->ulpStatus) {
891 /* Check for retry */
892 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
893 /* ELS command is being retried */
897 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
898 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
899 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
900 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
901 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
904 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
908 /* Good status, call state machine */
909 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
913 lpfc_els_free_iocb(phba, cmdiocb);
918 lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
923 struct lpfc_iocbq *elsiocb;
924 struct lpfc_sli_ring *pring;
925 struct lpfc_sli *psli;
930 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
932 cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
933 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
934 ndlp->nlp_DID, ELS_CMD_PRLI);
938 icmd = &elsiocb->iocb;
939 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
941 /* For PRLI request, remainder of payload is service parameters */
942 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
943 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
944 pcmd += sizeof (uint32_t);
946 /* For PRLI, remainder of payload is PRLI parameter page */
949 * If our firmware version is 3.20 or later,
950 * set the following bits for FC-TAPE support.
952 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
953 npr->ConfmComplAllowed = 1;
955 npr->TaskRetryIdReq = 1;
957 npr->estabImagePair = 1;
958 npr->readXferRdyDis = 1;
960 /* For FCP support */
961 npr->prliType = PRLI_FCP_TYPE;
962 npr->initiatorFunc = 1;
964 phba->fc_stat.elsXmitPRLI++;
965 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
966 spin_lock_irq(phba->host->host_lock);
967 ndlp->nlp_flag |= NLP_PRLI_SND;
968 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
969 ndlp->nlp_flag &= ~NLP_PRLI_SND;
970 spin_unlock_irq(phba->host->host_lock);
971 lpfc_els_free_iocb(phba, elsiocb);
974 spin_unlock_irq(phba->host->host_lock);
975 phba->fc_prli_sent++;
980 lpfc_more_adisc(struct lpfc_hba * phba)
984 if (phba->num_disc_nodes)
985 phba->num_disc_nodes--;
987 /* Continue discovery with <num_disc_nodes> ADISCs to go */
988 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
989 "%d:0210 Continue discovery with %d ADISCs to go "
990 "Data: x%x x%x x%x\n",
991 phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
992 phba->fc_flag, phba->hba_state);
994 /* Check to see if there are more ADISCs to be sent */
995 if (phba->fc_flag & FC_NLP_MORE) {
996 lpfc_set_disctmo(phba);
998 /* go thru NPR list and issue any remaining ELS ADISCs */
999 sentadisc = lpfc_els_disc_adisc(phba);
1005 lpfc_rscn_disc(struct lpfc_hba * phba)
1007 /* RSCN discovery */
1008 /* go thru NPR list and issue ELS PLOGIs */
1009 if (phba->fc_npr_cnt) {
1010 if (lpfc_els_disc_plogi(phba))
1013 if (phba->fc_flag & FC_RSCN_MODE) {
1014 /* Check to see if more RSCNs came in while we were
1015 * processing this one.
1017 if ((phba->fc_rscn_id_cnt == 0) &&
1018 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
1019 spin_lock_irq(phba->host->host_lock);
1020 phba->fc_flag &= ~FC_RSCN_MODE;
1021 spin_unlock_irq(phba->host->host_lock);
1023 lpfc_els_handle_rscn(phba);
1029 lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1030 struct lpfc_iocbq * rspiocb)
1033 struct lpfc_sli *psli;
1034 struct lpfc_nodelist *ndlp;
1040 /* we pass cmdiocb to state machine which needs rspiocb as well */
1041 cmdiocb->context_un.rsp_iocb = rspiocb;
1043 irsp = &(rspiocb->iocb);
1044 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1046 /* Since ndlp can be freed in the disc state machine, note if this node
1047 * is being used during discovery.
1049 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1050 spin_lock_irq(phba->host->host_lock);
1051 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1052 spin_unlock_irq(phba->host->host_lock);
1054 /* ADISC completes to NPort <nlp_DID> */
1055 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1056 "%d:0104 ADISC completes to NPort x%x "
1057 "Data: x%x x%x x%x x%x x%x\n",
1058 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1059 irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
1060 phba->num_disc_nodes);
1062 /* Check to see if link went down during discovery */
1063 if (lpfc_els_chk_latt(phba)) {
1064 spin_lock_irq(phba->host->host_lock);
1065 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1066 spin_unlock_irq(phba->host->host_lock);
1070 if (irsp->ulpStatus) {
1071 /* Check for retry */
1072 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1073 /* ELS command is being retried */
1075 spin_lock_irq(phba->host->host_lock);
1076 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1077 spin_unlock_irq(phba->host->host_lock);
1078 lpfc_set_disctmo(phba);
1083 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1084 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1085 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
1086 (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) &&
1087 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
1088 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1089 NLP_EVT_CMPL_ADISC);
1092 /* Good status, call state machine */
1093 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1094 NLP_EVT_CMPL_ADISC);
1097 if (disc && phba->num_disc_nodes) {
1098 /* Check to see if there are more ADISCs to be sent */
1099 lpfc_more_adisc(phba);
1101 /* Check to see if we are done with ADISC authentication */
1102 if (phba->num_disc_nodes == 0) {
1103 lpfc_can_disctmo(phba);
1104 /* If we get here, there is nothing left to wait for */
1105 if ((phba->hba_state < LPFC_HBA_READY) &&
1106 (phba->hba_state != LPFC_CLEAR_LA)) {
1107 /* Link up discovery */
1108 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
1110 phba->hba_state = LPFC_CLEAR_LA;
1111 lpfc_clear_la(phba, mbox);
1113 lpfc_mbx_cmpl_clear_la;
1114 rc = lpfc_sli_issue_mbox
1116 (MBX_NOWAIT | MBX_STOP_IOCB));
1117 if (rc == MBX_NOT_FINISHED) {
1119 phba->mbox_mem_pool);
1120 lpfc_disc_flush_list(phba);
1121 psli->ring[(psli->ip_ring)].
1123 ~LPFC_STOP_IOCB_EVENT;
1124 psli->ring[(psli->fcp_ring)].
1126 ~LPFC_STOP_IOCB_EVENT;
1127 psli->ring[(psli->next_ring)].
1129 ~LPFC_STOP_IOCB_EVENT;
1135 lpfc_rscn_disc(phba);
1140 lpfc_els_free_iocb(phba, cmdiocb);
1145 lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1150 struct lpfc_iocbq *elsiocb;
1151 struct lpfc_sli_ring *pring;
1152 struct lpfc_sli *psli;
1157 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1159 cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
1160 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1161 ndlp->nlp_DID, ELS_CMD_ADISC);
1165 icmd = &elsiocb->iocb;
1166 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1168 /* For ADISC request, remainder of payload is service parameters */
1169 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1170 pcmd += sizeof (uint32_t);
1172 /* Fill in ADISC payload */
1173 ap = (ADISC *) pcmd;
1174 ap->hardAL_PA = phba->fc_pref_ALPA;
1175 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
1176 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1177 ap->DID = be32_to_cpu(phba->fc_myDID);
1179 phba->fc_stat.elsXmitADISC++;
1180 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1181 spin_lock_irq(phba->host->host_lock);
1182 ndlp->nlp_flag |= NLP_ADISC_SND;
1183 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1184 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1185 spin_unlock_irq(phba->host->host_lock);
1186 lpfc_els_free_iocb(phba, elsiocb);
1189 spin_unlock_irq(phba->host->host_lock);
1194 lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1195 struct lpfc_iocbq * rspiocb)
1198 struct lpfc_sli *psli;
1199 struct lpfc_nodelist *ndlp;
1202 /* we pass cmdiocb to state machine which needs rspiocb as well */
1203 cmdiocb->context_un.rsp_iocb = rspiocb;
1205 irsp = &(rspiocb->iocb);
1206 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1207 spin_lock_irq(phba->host->host_lock);
1208 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1209 spin_unlock_irq(phba->host->host_lock);
1211 /* LOGO completes to NPort <nlp_DID> */
1212 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1213 "%d:0105 LOGO completes to NPort x%x "
1214 "Data: x%x x%x x%x x%x\n",
1215 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1216 irsp->un.ulpWord[4], irsp->ulpTimeout,
1217 phba->num_disc_nodes);
1219 /* Check to see if link went down during discovery */
1220 if (lpfc_els_chk_latt(phba))
1223 if (irsp->ulpStatus) {
1224 /* Check for retry */
1225 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1226 /* ELS command is being retried */
1230 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1231 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1232 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1233 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1234 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1237 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1241 /* Good status, call state machine.
1242 * This will unregister the rpi if needed.
1244 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
1248 lpfc_els_free_iocb(phba, cmdiocb);
1253 lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1257 struct lpfc_iocbq *elsiocb;
1258 struct lpfc_sli_ring *pring;
1259 struct lpfc_sli *psli;
1264 pring = &psli->ring[LPFC_ELS_RING];
1266 cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
1267 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1268 ndlp->nlp_DID, ELS_CMD_LOGO);
1272 icmd = &elsiocb->iocb;
1273 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1274 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1275 pcmd += sizeof (uint32_t);
1277 /* Fill in LOGO payload */
1278 *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
1279 pcmd += sizeof (uint32_t);
1280 memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
1282 phba->fc_stat.elsXmitLOGO++;
1283 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1284 spin_lock_irq(phba->host->host_lock);
1285 ndlp->nlp_flag |= NLP_LOGO_SND;
1286 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1287 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1288 spin_unlock_irq(phba->host->host_lock);
1289 lpfc_els_free_iocb(phba, elsiocb);
1292 spin_unlock_irq(phba->host->host_lock);
1297 lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1298 struct lpfc_iocbq * rspiocb)
1302 irsp = &rspiocb->iocb;
1304 /* ELS cmd tag <ulpIoTag> completes */
1305 lpfc_printf_log(phba,
1308 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
1310 irsp->ulpIoTag, irsp->ulpStatus,
1311 irsp->un.ulpWord[4], irsp->ulpTimeout);
1313 /* Check to see if link went down during discovery */
1314 lpfc_els_chk_latt(phba);
1315 lpfc_els_free_iocb(phba, cmdiocb);
1320 lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1323 struct lpfc_iocbq *elsiocb;
1324 struct lpfc_sli_ring *pring;
1325 struct lpfc_sli *psli;
1328 struct lpfc_nodelist *ndlp;
1331 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1332 cmdsize = (sizeof (uint32_t) + sizeof (SCR));
1333 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1337 lpfc_nlp_init(phba, ndlp, nportid);
1339 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1340 ndlp->nlp_DID, ELS_CMD_SCR);
1342 mempool_free( ndlp, phba->nlp_mem_pool);
1346 icmd = &elsiocb->iocb;
1347 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1349 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1350 pcmd += sizeof (uint32_t);
1352 /* For SCR, remainder of payload is SCR parameter page */
1353 memset(pcmd, 0, sizeof (SCR));
1354 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1356 phba->fc_stat.elsXmitSCR++;
1357 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1358 spin_lock_irq(phba->host->host_lock);
1359 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1360 spin_unlock_irq(phba->host->host_lock);
1361 mempool_free( ndlp, phba->nlp_mem_pool);
1362 lpfc_els_free_iocb(phba, elsiocb);
1365 spin_unlock_irq(phba->host->host_lock);
1366 mempool_free( ndlp, phba->nlp_mem_pool);
1371 lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1374 struct lpfc_iocbq *elsiocb;
1375 struct lpfc_sli_ring *pring;
1376 struct lpfc_sli *psli;
1381 struct lpfc_nodelist *ondlp;
1382 struct lpfc_nodelist *ndlp;
1385 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1386 cmdsize = (sizeof (uint32_t) + sizeof (FARP));
1387 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1390 lpfc_nlp_init(phba, ndlp, nportid);
1392 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1393 ndlp->nlp_DID, ELS_CMD_RNID);
1395 mempool_free( ndlp, phba->nlp_mem_pool);
1399 icmd = &elsiocb->iocb;
1400 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1402 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1403 pcmd += sizeof (uint32_t);
1405 /* Fill in FARPR payload */
1406 fp = (FARP *) (pcmd);
1407 memset(fp, 0, sizeof (FARP));
1408 lp = (uint32_t *) pcmd;
1409 *lp++ = be32_to_cpu(nportid);
1410 *lp++ = be32_to_cpu(phba->fc_myDID);
1412 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1414 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
1415 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1416 if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
1417 memcpy(&fp->OportName, &ondlp->nlp_portname,
1418 sizeof (struct lpfc_name));
1419 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1420 sizeof (struct lpfc_name));
1423 phba->fc_stat.elsXmitFARPR++;
1424 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1425 spin_lock_irq(phba->host->host_lock);
1426 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1427 spin_unlock_irq(phba->host->host_lock);
1428 mempool_free( ndlp, phba->nlp_mem_pool);
1429 lpfc_els_free_iocb(phba, elsiocb);
1432 spin_unlock_irq(phba->host->host_lock);
1433 mempool_free( ndlp, phba->nlp_mem_pool);
1438 lpfc_els_retry_delay(unsigned long ptr)
1440 struct lpfc_nodelist *ndlp;
1441 struct lpfc_hba *phba;
1442 unsigned long iflag;
1443 struct lpfc_work_evt *evtp;
1445 ndlp = (struct lpfc_nodelist *)ptr;
1446 phba = ndlp->nlp_phba;
1447 evtp = &ndlp->els_retry_evt;
1449 spin_lock_irqsave(phba->host->host_lock, iflag);
1450 if (!list_empty(&evtp->evt_listp)) {
1451 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1455 evtp->evt_arg1 = ndlp;
1456 evtp->evt = LPFC_EVT_ELS_RETRY;
1457 list_add_tail(&evtp->evt_listp, &phba->work_list);
1458 if (phba->work_wait)
1459 wake_up(phba->work_wait);
1461 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1466 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1468 struct lpfc_hba *phba;
1473 phba = ndlp->nlp_phba;
1474 spin_lock_irq(phba->host->host_lock);
1475 did = ndlp->nlp_DID;
1476 cmd = ndlp->nlp_last_elscmd;
1477 ndlp->nlp_last_elscmd = 0;
1479 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1480 spin_unlock_irq(phba->host->host_lock);
1484 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1485 spin_unlock_irq(phba->host->host_lock);
1486 retry = ndlp->nlp_retry;
1490 lpfc_issue_els_flogi(phba, ndlp, retry);
1493 if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) {
1494 ndlp->nlp_prev_state = ndlp->nlp_state;
1495 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1496 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1500 if (!lpfc_issue_els_adisc(phba, ndlp, retry)) {
1501 ndlp->nlp_prev_state = ndlp->nlp_state;
1502 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1503 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1507 if (!lpfc_issue_els_prli(phba, ndlp, retry)) {
1508 ndlp->nlp_prev_state = ndlp->nlp_state;
1509 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1510 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1514 if (!lpfc_issue_els_logo(phba, ndlp, retry)) {
1515 ndlp->nlp_prev_state = ndlp->nlp_state;
1516 ndlp->nlp_state = NLP_STE_NPR_NODE;
1517 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1525 lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1526 struct lpfc_iocbq * rspiocb)
1529 struct lpfc_dmabuf *pcmd;
1530 struct lpfc_nodelist *ndlp;
1533 int retry, maxretry;
1540 maxretry = lpfc_max_els_tries;
1541 irsp = &rspiocb->iocb;
1542 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1543 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1546 /* Note: context2 may be 0 for internal driver abort
1547 * of delays ELS command.
1550 if (pcmd && pcmd->virt) {
1551 elscmd = (uint32_t *) (pcmd->virt);
1556 did = ndlp->nlp_DID;
1558 /* We should only hit this case for retrying PLOGI */
1559 did = irsp->un.elsreq64.remoteID;
1560 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1561 if (!ndlp && (cmd != ELS_CMD_PLOGI))
1565 switch (irsp->ulpStatus) {
1566 case IOSTAT_FCP_RSP_ERROR:
1567 case IOSTAT_REMOTE_STOP:
1570 case IOSTAT_LOCAL_REJECT:
1571 switch ((irsp->un.ulpWord[4] & 0xff)) {
1572 case IOERR_LOOP_OPEN_FAILURE:
1573 if (cmd == ELS_CMD_PLOGI) {
1574 if (cmdiocb->retry == 0) {
1581 case IOERR_SEQUENCE_TIMEOUT:
1585 case IOERR_NO_RESOURCES:
1586 if (cmd == ELS_CMD_PLOGI) {
1592 case IOERR_INVALID_RPI:
1598 case IOSTAT_NPORT_RJT:
1599 case IOSTAT_FABRIC_RJT:
1600 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1606 case IOSTAT_NPORT_BSY:
1607 case IOSTAT_FABRIC_BSY:
1612 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1613 /* Added for Vendor specifc support
1614 * Just keep retrying for these Rsn / Exp codes
1616 switch (stat.un.b.lsRjtRsnCode) {
1617 case LSRJT_UNABLE_TPC:
1618 if (stat.un.b.lsRjtRsnCodeExp ==
1619 LSEXP_CMD_IN_PROGRESS) {
1620 if (cmd == ELS_CMD_PLOGI) {
1627 if (cmd == ELS_CMD_PLOGI) {
1629 maxretry = lpfc_max_els_tries + 1;
1635 case LSRJT_LOGICAL_BSY:
1636 if (cmd == ELS_CMD_PLOGI) {
1645 case IOSTAT_INTERMED_RSP:
1653 if (did == FDMI_DID)
1656 if ((++cmdiocb->retry) >= maxretry) {
1657 phba->fc_stat.elsRetryExceeded++;
1663 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1664 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1665 "%d:0107 Retry ELS command x%x to remote "
1666 "NPORT x%x Data: x%x x%x\n",
1668 cmd, did, cmdiocb->retry, delay);
1670 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
1671 /* If discovery / RSCN timer is running, reset it */
1672 if (timer_pending(&phba->fc_disctmo) ||
1673 (phba->fc_flag & FC_RSCN_MODE)) {
1674 lpfc_set_disctmo(phba);
1678 phba->fc_stat.elsXmitRetry++;
1679 if (ndlp && delay) {
1680 phba->fc_stat.elsDelayRetry++;
1681 ndlp->nlp_retry = cmdiocb->retry;
1683 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1684 ndlp->nlp_flag |= NLP_DELAY_TMO;
1686 ndlp->nlp_prev_state = ndlp->nlp_state;
1687 ndlp->nlp_state = NLP_STE_NPR_NODE;
1688 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1689 ndlp->nlp_last_elscmd = cmd;
1695 lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
1699 ndlp->nlp_prev_state = ndlp->nlp_state;
1700 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1701 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1703 lpfc_issue_els_plogi(phba, did, cmdiocb->retry);
1706 ndlp->nlp_prev_state = ndlp->nlp_state;
1707 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1708 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1709 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
1712 ndlp->nlp_prev_state = ndlp->nlp_state;
1713 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1714 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1715 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
1718 ndlp->nlp_prev_state = ndlp->nlp_state;
1719 ndlp->nlp_state = NLP_STE_NPR_NODE;
1720 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1721 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
1726 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1727 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1728 "%d:0108 No retry ELS command x%x to remote NPORT x%x "
1731 cmd, did, cmdiocb->retry);
1737 lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
1739 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
1741 /* context2 = cmd, context2->next = rsp, context3 = bpl */
1742 if (elsiocb->context2) {
1743 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
1744 /* Free the response before processing the command. */
1745 if (!list_empty(&buf_ptr1->list)) {
1746 list_remove_head(&buf_ptr1->list, buf_ptr,
1749 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1752 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
1756 if (elsiocb->context3) {
1757 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
1758 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1761 spin_lock_irq(phba->host->host_lock);
1762 lpfc_sli_release_iocbq(phba, elsiocb);
1763 spin_unlock_irq(phba->host->host_lock);
1768 lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1769 struct lpfc_iocbq * rspiocb)
1771 struct lpfc_nodelist *ndlp;
1773 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1775 /* ACC to LOGO completes to NPort <nlp_DID> */
1776 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1777 "%d:0109 ACC to LOGO completes to NPort x%x "
1778 "Data: x%x x%x x%x\n",
1779 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1780 ndlp->nlp_state, ndlp->nlp_rpi);
1782 switch (ndlp->nlp_state) {
1783 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1784 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1786 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1787 lpfc_unreg_rpi(phba, ndlp);
1792 lpfc_els_free_iocb(phba, cmdiocb);
1797 lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1798 struct lpfc_iocbq * rspiocb)
1800 struct lpfc_nodelist *ndlp;
1801 LPFC_MBOXQ_t *mbox = NULL;
1803 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1804 if (cmdiocb->context_un.mbox)
1805 mbox = cmdiocb->context_un.mbox;
1808 /* Check to see if link went down during discovery */
1809 if ((lpfc_els_chk_latt(phba)) || !ndlp) {
1811 mempool_free( mbox, phba->mbox_mem_pool);
1816 /* ELS response tag <ulpIoTag> completes */
1817 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1818 "%d:0110 ELS response tag x%x completes "
1819 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
1821 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1822 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
1823 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
1827 if ((rspiocb->iocb.ulpStatus == 0)
1828 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1829 /* set_slim mailbox command needs to execute first,
1830 * queue this command to be processed later.
1832 lpfc_unreg_rpi(phba, ndlp);
1833 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1834 mbox->context2 = ndlp;
1835 ndlp->nlp_prev_state = ndlp->nlp_state;
1836 ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
1837 lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
1838 if (lpfc_sli_issue_mbox(phba, mbox,
1839 (MBX_NOWAIT | MBX_STOP_IOCB))
1840 != MBX_NOT_FINISHED) {
1843 /* NOTE: we should have messages for unsuccessful
1845 mempool_free( mbox, phba->mbox_mem_pool);
1847 mempool_free( mbox, phba->mbox_mem_pool);
1848 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1849 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1856 spin_lock_irq(phba->host->host_lock);
1857 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
1858 spin_unlock_irq(phba->host->host_lock);
1860 lpfc_els_free_iocb(phba, cmdiocb);
1865 lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1866 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
1867 LPFC_MBOXQ_t * mbox, uint8_t newnode)
1871 struct lpfc_iocbq *elsiocb;
1872 struct lpfc_sli_ring *pring;
1873 struct lpfc_sli *psli;
1879 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1880 oldcmd = &oldiocb->iocb;
1884 cmdsize = sizeof (uint32_t);
1885 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1886 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1888 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
1891 icmd = &elsiocb->iocb;
1892 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1893 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1894 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1895 pcmd += sizeof (uint32_t);
1898 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
1899 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1900 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1904 icmd = &elsiocb->iocb;
1905 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1906 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1909 elsiocb->context_un.mbox = mbox;
1911 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1912 pcmd += sizeof (uint32_t);
1913 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1920 elsiocb->context1 = NULL;
1922 /* Xmit ELS ACC response tag <ulpIoTag> */
1923 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1924 "%d:0128 Xmit ELS ACC response tag x%x "
1925 "Data: x%x x%x x%x x%x x%x\n",
1927 elsiocb->iocb.ulpIoTag,
1928 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1929 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1931 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
1932 spin_lock_irq(phba->host->host_lock);
1933 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
1934 spin_unlock_irq(phba->host->host_lock);
1935 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
1937 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1940 phba->fc_stat.elsXmitACC++;
1941 spin_lock_irq(phba->host->host_lock);
1942 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1943 spin_unlock_irq(phba->host->host_lock);
1944 if (rc == IOCB_ERROR) {
1945 lpfc_els_free_iocb(phba, elsiocb);
1952 lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
1953 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
1957 struct lpfc_iocbq *elsiocb;
1958 struct lpfc_sli_ring *pring;
1959 struct lpfc_sli *psli;
1965 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1967 cmdsize = 2 * sizeof (uint32_t);
1968 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1969 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
1973 icmd = &elsiocb->iocb;
1974 oldcmd = &oldiocb->iocb;
1975 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1976 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1978 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
1979 pcmd += sizeof (uint32_t);
1980 *((uint32_t *) (pcmd)) = rejectError;
1982 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
1983 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1984 "%d:0129 Xmit ELS RJT x%x response tag x%x "
1985 "Data: x%x x%x x%x x%x x%x\n",
1987 rejectError, elsiocb->iocb.ulpIoTag,
1988 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1989 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1991 phba->fc_stat.elsXmitLSRJT++;
1992 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1993 spin_lock_irq(phba->host->host_lock);
1994 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1995 spin_unlock_irq(phba->host->host_lock);
1996 if (rc == IOCB_ERROR) {
1997 lpfc_els_free_iocb(phba, elsiocb);
2004 lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2005 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2010 struct lpfc_iocbq *elsiocb;
2011 struct lpfc_sli_ring *pring;
2012 struct lpfc_sli *psli;
2018 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2020 cmdsize = sizeof (uint32_t) + sizeof (ADISC);
2021 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2022 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2026 /* Xmit ADISC ACC response tag <ulpIoTag> */
2027 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2028 "%d:0130 Xmit ADISC ACC response tag x%x "
2029 "Data: x%x x%x x%x x%x x%x\n",
2031 elsiocb->iocb.ulpIoTag,
2032 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2033 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2035 icmd = &elsiocb->iocb;
2036 oldcmd = &oldiocb->iocb;
2037 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2038 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2040 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2041 pcmd += sizeof (uint32_t);
2043 ap = (ADISC *) (pcmd);
2044 ap->hardAL_PA = phba->fc_pref_ALPA;
2045 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
2046 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2047 ap->DID = be32_to_cpu(phba->fc_myDID);
2049 phba->fc_stat.elsXmitACC++;
2050 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2051 spin_lock_irq(phba->host->host_lock);
2052 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2053 spin_unlock_irq(phba->host->host_lock);
2054 if (rc == IOCB_ERROR) {
2055 lpfc_els_free_iocb(phba, elsiocb);
2062 lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
2063 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2069 struct lpfc_iocbq *elsiocb;
2070 struct lpfc_sli_ring *pring;
2071 struct lpfc_sli *psli;
2077 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2079 cmdsize = sizeof (uint32_t) + sizeof (PRLI);
2080 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp,
2081 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2085 /* Xmit PRLI ACC response tag <ulpIoTag> */
2086 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2087 "%d:0131 Xmit PRLI ACC response tag x%x "
2088 "Data: x%x x%x x%x x%x x%x\n",
2090 elsiocb->iocb.ulpIoTag,
2091 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2092 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2094 icmd = &elsiocb->iocb;
2095 oldcmd = &oldiocb->iocb;
2096 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2097 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2099 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2100 pcmd += sizeof (uint32_t);
2102 /* For PRLI, remainder of payload is PRLI parameter page */
2103 memset(pcmd, 0, sizeof (PRLI));
2105 npr = (PRLI *) pcmd;
2108 * If our firmware version is 3.20 or later,
2109 * set the following bits for FC-TAPE support.
2111 if (vpd->rev.feaLevelHigh >= 0x02) {
2112 npr->ConfmComplAllowed = 1;
2114 npr->TaskRetryIdReq = 1;
2117 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2118 npr->estabImagePair = 1;
2119 npr->readXferRdyDis = 1;
2120 npr->ConfmComplAllowed = 1;
2122 npr->prliType = PRLI_FCP_TYPE;
2123 npr->initiatorFunc = 1;
2125 phba->fc_stat.elsXmitACC++;
2126 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2128 spin_lock_irq(phba->host->host_lock);
2129 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2130 spin_unlock_irq(phba->host->host_lock);
2131 if (rc == IOCB_ERROR) {
2132 lpfc_els_free_iocb(phba, elsiocb);
2139 lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2141 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2146 struct lpfc_iocbq *elsiocb;
2147 struct lpfc_sli_ring *pring;
2148 struct lpfc_sli *psli;
2154 pring = &psli->ring[LPFC_ELS_RING];
2156 cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
2157 + (2 * sizeof (struct lpfc_name));
2159 cmdsize += sizeof (RNID_TOP_DISC);
2161 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2162 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2166 /* Xmit RNID ACC response tag <ulpIoTag> */
2167 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2168 "%d:0132 Xmit RNID ACC response tag x%x "
2171 elsiocb->iocb.ulpIoTag,
2172 elsiocb->iocb.ulpContext);
2174 icmd = &elsiocb->iocb;
2175 oldcmd = &oldiocb->iocb;
2176 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2177 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2179 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2180 pcmd += sizeof (uint32_t);
2182 memset(pcmd, 0, sizeof (RNID));
2183 rn = (RNID *) (pcmd);
2184 rn->Format = format;
2185 rn->CommonLen = (2 * sizeof (struct lpfc_name));
2186 memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
2187 memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2190 rn->SpecificLen = 0;
2192 case RNID_TOPOLOGY_DISC:
2193 rn->SpecificLen = sizeof (RNID_TOP_DISC);
2194 memcpy(&rn->un.topologyDisc.portName,
2195 &phba->fc_portname, sizeof (struct lpfc_name));
2196 rn->un.topologyDisc.unitType = RNID_HBA;
2197 rn->un.topologyDisc.physPort = 0;
2198 rn->un.topologyDisc.attachedNodes = 0;
2202 rn->SpecificLen = 0;
2206 phba->fc_stat.elsXmitACC++;
2207 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2208 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2209 * it could be freed */
2211 spin_lock_irq(phba->host->host_lock);
2212 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2213 spin_unlock_irq(phba->host->host_lock);
2214 if (rc == IOCB_ERROR) {
2215 lpfc_els_free_iocb(phba, elsiocb);
2222 lpfc_els_disc_adisc(struct lpfc_hba * phba)
2225 struct lpfc_nodelist *ndlp, *next_ndlp;
2228 /* go thru NPR list and issue any remaining ELS ADISCs */
2229 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2231 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2232 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2233 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2234 ndlp->nlp_prev_state = ndlp->nlp_state;
2235 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
2236 lpfc_nlp_list(phba, ndlp,
2238 lpfc_issue_els_adisc(phba, ndlp, 0);
2240 phba->num_disc_nodes++;
2241 if (phba->num_disc_nodes >=
2242 phba->cfg_discovery_threads) {
2243 spin_lock_irq(phba->host->host_lock);
2244 phba->fc_flag |= FC_NLP_MORE;
2245 spin_unlock_irq(phba->host->host_lock);
2251 if (sentadisc == 0) {
2252 spin_lock_irq(phba->host->host_lock);
2253 phba->fc_flag &= ~FC_NLP_MORE;
2254 spin_unlock_irq(phba->host->host_lock);
2260 lpfc_els_disc_plogi(struct lpfc_hba * phba)
2263 struct lpfc_nodelist *ndlp, *next_ndlp;
2266 /* go thru NPR list and issue any remaining ELS PLOGIs */
2267 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2269 if ((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
2270 (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
2271 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2272 ndlp->nlp_prev_state = ndlp->nlp_state;
2273 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2274 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2275 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
2277 phba->num_disc_nodes++;
2278 if (phba->num_disc_nodes >=
2279 phba->cfg_discovery_threads) {
2280 spin_lock_irq(phba->host->host_lock);
2281 phba->fc_flag |= FC_NLP_MORE;
2282 spin_unlock_irq(phba->host->host_lock);
2288 if (sentplogi == 0) {
2289 spin_lock_irq(phba->host->host_lock);
2290 phba->fc_flag &= ~FC_NLP_MORE;
2291 spin_unlock_irq(phba->host->host_lock);
2297 lpfc_els_flush_rscn(struct lpfc_hba * phba)
2299 struct lpfc_dmabuf *mp;
2302 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2303 mp = phba->fc_rscn_id_list[i];
2304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2306 phba->fc_rscn_id_list[i] = NULL;
2308 phba->fc_rscn_id_cnt = 0;
2309 spin_lock_irq(phba->host->host_lock);
2310 phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2311 spin_unlock_irq(phba->host->host_lock);
2312 lpfc_can_disctmo(phba);
2317 lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
2321 struct lpfc_dmabuf *mp;
2323 uint32_t payload_len, cmd, i, match;
2325 ns_did.un.word = did;
2328 /* Never match fabric nodes for RSCNs */
2329 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2332 /* If we are doing a FULL RSCN rediscovery, match everything */
2333 if (phba->fc_flag & FC_RSCN_DISCOVERY) {
2337 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2338 mp = phba->fc_rscn_id_list[i];
2339 lp = (uint32_t *) mp->virt;
2341 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2342 payload_len -= sizeof (uint32_t); /* take off word 0 */
2343 while (payload_len) {
2344 rscn_did.un.word = *lp++;
2345 rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
2346 payload_len -= sizeof (uint32_t);
2347 switch (rscn_did.un.b.resv) {
2348 case 0: /* Single N_Port ID effected */
2349 if (ns_did.un.word == rscn_did.un.word) {
2353 case 1: /* Whole N_Port Area effected */
2354 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2355 && (ns_did.un.b.area == rscn_did.un.b.area))
2360 case 2: /* Whole N_Port Domain effected */
2361 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2366 case 3: /* Whole Fabric effected */
2370 /* Unknown Identifier in RSCN list */
2371 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2372 "%d:0217 Unknown Identifier in "
2373 "RSCN payload Data: x%x\n",
2374 phba->brd_no, rscn_did.un.word);
2386 lpfc_rscn_recovery_check(struct lpfc_hba * phba)
2388 struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
2389 struct list_head *listp;
2390 struct list_head *node_list[7];
2393 /* Look at all nodes effected by pending RSCNs and move
2396 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
2397 node_list[1] = &phba->fc_nlpmap_list;
2398 node_list[2] = &phba->fc_nlpunmap_list;
2399 node_list[3] = &phba->fc_prli_list;
2400 node_list[4] = &phba->fc_reglogin_list;
2401 node_list[5] = &phba->fc_adisc_list;
2402 node_list[6] = &phba->fc_plogi_list;
2403 for (i = 0; i < 7; i++) {
2404 listp = node_list[i];
2405 if (list_empty(listp))
2408 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
2409 if (!(lpfc_rscn_payload_check(phba, ndlp->nlp_DID)))
2412 lpfc_disc_state_machine(phba, ndlp, NULL,
2413 NLP_EVT_DEVICE_RECOVERY);
2415 /* Make sure NLP_DELAY_TMO is NOT running
2416 * after a device recovery event.
2418 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2419 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2420 ndlp->nlp_last_elscmd = 0;
2421 del_timer_sync(&ndlp->nlp_delayfunc);
2422 if (!list_empty(&ndlp->
2423 els_retry_evt.evt_listp))
2424 list_del_init(&ndlp->
2425 els_retry_evt.evt_listp);
2433 lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2434 struct lpfc_iocbq * cmdiocb,
2435 struct lpfc_nodelist * ndlp, uint8_t newnode)
2437 struct lpfc_dmabuf *pcmd;
2440 uint32_t payload_len, cmd;
2442 icmd = &cmdiocb->iocb;
2443 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2444 lp = (uint32_t *) pcmd->virt;
2447 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2448 payload_len -= sizeof (uint32_t); /* take off word 0 */
2449 cmd &= ELS_CMD_MASK;
2452 lpfc_printf_log(phba,
2455 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
2457 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2459 /* If we are about to begin discovery, just ACC the RSCN.
2460 * Discovery processing will satisfy it.
2462 if (phba->hba_state < LPFC_NS_QRY) {
2463 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2468 /* If we are already processing an RSCN, save the received
2469 * RSCN payload buffer, cmdiocb->context2 to process later.
2471 if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
2472 if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
2473 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
2474 spin_lock_irq(phba->host->host_lock);
2475 phba->fc_flag |= FC_RSCN_MODE;
2476 spin_unlock_irq(phba->host->host_lock);
2477 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2479 /* If we zero, cmdiocb->context2, the calling
2480 * routine will not try to free it.
2482 cmdiocb->context2 = NULL;
2485 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2486 "%d:0235 Deferred RSCN "
2487 "Data: x%x x%x x%x\n",
2488 phba->brd_no, phba->fc_rscn_id_cnt,
2489 phba->fc_flag, phba->hba_state);
2491 spin_lock_irq(phba->host->host_lock);
2492 phba->fc_flag |= FC_RSCN_DISCOVERY;
2493 spin_unlock_irq(phba->host->host_lock);
2494 /* ReDiscovery RSCN */
2495 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2496 "%d:0234 ReDiscovery RSCN "
2497 "Data: x%x x%x x%x\n",
2498 phba->brd_no, phba->fc_rscn_id_cnt,
2499 phba->fc_flag, phba->hba_state);
2502 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2505 /* send RECOVERY event for ALL nodes that match RSCN payload */
2506 lpfc_rscn_recovery_check(phba);
2510 phba->fc_flag |= FC_RSCN_MODE;
2511 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2513 * If we zero, cmdiocb->context2, the calling routine will
2514 * not try to free it.
2516 cmdiocb->context2 = NULL;
2518 lpfc_set_disctmo(phba);
2521 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
2523 /* send RECOVERY event for ALL nodes that match RSCN payload */
2524 lpfc_rscn_recovery_check(phba);
2526 return lpfc_els_handle_rscn(phba);
2530 lpfc_els_handle_rscn(struct lpfc_hba * phba)
2532 struct lpfc_nodelist *ndlp;
2534 /* Start timer for RSCN processing */
2535 lpfc_set_disctmo(phba);
2537 /* RSCN processed */
2538 lpfc_printf_log(phba,
2541 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
2543 phba->fc_flag, 0, phba->fc_rscn_id_cnt,
2546 /* To process RSCN, first compare RSCN data with NameServer */
2547 phba->fc_ns_retry = 0;
2548 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED, NameServer_DID);
2550 /* Good ndlp, issue CT Request to NameServer */
2551 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
2552 /* Wait for NameServer query cmpl before we can
2557 /* If login to NameServer does not exist, issue one */
2558 /* Good status, issue PLOGI to NameServer */
2559 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2561 /* Wait for NameServer login cmpl before we can
2565 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2567 lpfc_els_flush_rscn(phba);
2570 lpfc_nlp_init(phba, ndlp, NameServer_DID);
2571 ndlp->nlp_type |= NLP_FABRIC;
2572 ndlp->nlp_prev_state = ndlp->nlp_state;
2573 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2574 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
2575 /* Wait for NameServer login cmpl before we can
2581 lpfc_els_flush_rscn(phba);
2586 lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2587 struct lpfc_iocbq * cmdiocb,
2588 struct lpfc_nodelist * ndlp, uint8_t newnode)
2590 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2591 uint32_t *lp = (uint32_t *) pcmd->virt;
2592 IOCB_t *icmd = &cmdiocb->iocb;
2593 struct serv_parm *sp;
2600 sp = (struct serv_parm *) lp;
2602 /* FLOGI received */
2604 lpfc_set_disctmo(phba);
2606 if (phba->fc_topology == TOPOLOGY_LOOP) {
2607 /* We should never receive a FLOGI in loop mode, ignore it */
2608 did = icmd->un.elsreq64.remoteID;
2610 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2612 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2613 "%d:0113 An FLOGI ELS command x%x was received "
2614 "from DID x%x in Loop Mode\n",
2615 phba->brd_no, cmd, did);
2621 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
2622 /* For a FLOGI we accept, then if our portname is greater
2623 * then the remote portname we initiate Nport login.
2626 rc = memcmp(&phba->fc_portname, &sp->portName,
2627 sizeof (struct lpfc_name));
2630 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
2631 GFP_KERNEL)) == 0) {
2634 lpfc_linkdown(phba);
2635 lpfc_init_link(phba, mbox,
2637 phba->cfg_link_speed);
2638 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2639 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2640 rc = lpfc_sli_issue_mbox
2641 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
2642 if (rc == MBX_NOT_FINISHED) {
2643 mempool_free( mbox, phba->mbox_mem_pool);
2646 } else if (rc > 0) { /* greater than */
2647 spin_lock_irq(phba->host->host_lock);
2648 phba->fc_flag |= FC_PT2PT_PLOGI;
2649 spin_unlock_irq(phba->host->host_lock);
2651 phba->fc_flag |= FC_PT2PT;
2652 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2654 /* Reject this request because invalid parameters */
2655 stat.un.b.lsRjtRsvd0 = 0;
2656 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2657 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
2658 stat.un.b.vendorUnique = 0;
2659 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2664 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
2670 lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2671 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2673 struct lpfc_dmabuf *pcmd;
2680 icmd = &cmdiocb->iocb;
2681 did = icmd->un.elsreq64.remoteID;
2682 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2683 lp = (uint32_t *) pcmd->virt;
2690 switch (rn->Format) {
2692 case RNID_TOPOLOGY_DISC:
2694 lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
2697 /* Reject this request because format not supported */
2698 stat.un.b.lsRjtRsvd0 = 0;
2699 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2700 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2701 stat.un.b.vendorUnique = 0;
2702 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2708 lpfc_els_rcv_lirr(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2709 struct lpfc_nodelist * ndlp)
2713 /* For now, unconditionally reject this command */
2714 stat.un.b.lsRjtRsvd0 = 0;
2715 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2716 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2717 stat.un.b.vendorUnique = 0;
2718 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2723 lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2725 struct lpfc_sli *psli;
2726 struct lpfc_sli_ring *pring;
2731 struct lpfc_iocbq *elsiocb;
2732 struct lpfc_nodelist *ndlp;
2733 uint16_t xri, status;
2737 pring = &psli->ring[LPFC_ELS_RING];
2740 ndlp = (struct lpfc_nodelist *) pmb->context2;
2741 xri = (uint16_t) ((unsigned long)(pmb->context1));
2745 if (mb->mbxStatus) {
2746 mempool_free( pmb, phba->mbox_mem_pool);
2750 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
2751 mempool_free( pmb, phba->mbox_mem_pool);
2752 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp,
2753 ndlp->nlp_DID, ELS_CMD_ACC);
2757 icmd = &elsiocb->iocb;
2758 icmd->ulpContext = xri;
2760 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2761 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2762 pcmd += sizeof (uint32_t); /* Skip past command */
2763 rps_rsp = (RPS_RSP *)pcmd;
2765 if (phba->fc_topology != TOPOLOGY_LOOP)
2769 if (phba->fc_flag & FC_FABRIC)
2773 rps_rsp->portStatus = be16_to_cpu(status);
2774 rps_rsp->linkFailureCnt = be32_to_cpu(mb->un.varRdLnk.linkFailureCnt);
2775 rps_rsp->lossSyncCnt = be32_to_cpu(mb->un.varRdLnk.lossSyncCnt);
2776 rps_rsp->lossSignalCnt = be32_to_cpu(mb->un.varRdLnk.lossSignalCnt);
2777 rps_rsp->primSeqErrCnt = be32_to_cpu(mb->un.varRdLnk.primSeqErrCnt);
2778 rps_rsp->invalidXmitWord = be32_to_cpu(mb->un.varRdLnk.invalidXmitWord);
2779 rps_rsp->crcCnt = be32_to_cpu(mb->un.varRdLnk.crcCnt);
2781 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2782 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2783 "%d:0128 Xmit ELS RPS ACC response tag x%x "
2784 "Data: x%x x%x x%x x%x x%x\n",
2786 elsiocb->iocb.ulpIoTag,
2787 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2788 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2790 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2791 phba->fc_stat.elsXmitACC++;
2792 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2793 lpfc_els_free_iocb(phba, elsiocb);
2799 lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2800 struct lpfc_nodelist * ndlp)
2805 struct lpfc_dmabuf *pcmd;
2809 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2810 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
2811 stat.un.b.lsRjtRsvd0 = 0;
2812 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2813 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2814 stat.un.b.vendorUnique = 0;
2815 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2818 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2819 lp = (uint32_t *) pcmd->virt;
2820 flag = (be32_to_cpu(*lp++) & 0xf);
2824 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2825 ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname,
2826 sizeof (struct lpfc_name)) == 0))) {
2827 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
2828 lpfc_read_lnk_stat(phba, mbox);
2830 (void *)((unsigned long)cmdiocb->iocb.ulpContext);
2831 mbox->context2 = ndlp;
2832 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
2833 if (lpfc_sli_issue_mbox (phba, mbox,
2834 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) {
2835 /* Mbox completion will send ELS Response */
2838 mempool_free(mbox, phba->mbox_mem_pool);
2841 stat.un.b.lsRjtRsvd0 = 0;
2842 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2843 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2844 stat.un.b.vendorUnique = 0;
2845 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2850 lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2851 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2856 struct lpfc_iocbq *elsiocb;
2857 struct lpfc_sli_ring *pring;
2858 struct lpfc_sli *psli;
2862 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2864 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2865 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2869 icmd = &elsiocb->iocb;
2870 oldcmd = &oldiocb->iocb;
2871 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2873 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2874 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2875 pcmd += sizeof (uint16_t);
2876 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
2877 pcmd += sizeof(uint16_t);
2879 /* Setup the RPL ACC payload */
2880 rpl_rsp.listLen = be32_to_cpu(1);
2882 rpl_rsp.port_num_blk.portNum = 0;
2883 rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID);
2884 memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname,
2885 sizeof(struct lpfc_name));
2887 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
2890 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2891 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2892 "%d:0128 Xmit ELS RPL ACC response tag x%x "
2893 "Data: x%x x%x x%x x%x x%x\n",
2895 elsiocb->iocb.ulpIoTag,
2896 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2897 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2899 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2901 phba->fc_stat.elsXmitACC++;
2902 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2903 lpfc_els_free_iocb(phba, elsiocb);
2910 lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2911 struct lpfc_nodelist * ndlp)
2913 struct lpfc_dmabuf *pcmd;
2920 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2921 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
2922 stat.un.b.lsRjtRsvd0 = 0;
2923 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2924 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2925 stat.un.b.vendorUnique = 0;
2926 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2929 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2930 lp = (uint32_t *) pcmd->virt;
2931 rpl = (RPL *) (lp + 1);
2933 maxsize = be32_to_cpu(rpl->maxsize);
2935 /* We support only one port */
2936 if ((rpl->index == 0) &&
2938 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
2939 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
2941 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
2943 lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp);
2949 lpfc_els_rcv_farp(struct lpfc_hba * phba,
2950 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2952 struct lpfc_dmabuf *pcmd;
2956 uint32_t cmd, cnt, did;
2958 icmd = &cmdiocb->iocb;
2959 did = icmd->un.elsreq64.remoteID;
2960 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2961 lp = (uint32_t *) pcmd->virt;
2966 /* FARP-REQ received from DID <did> */
2967 lpfc_printf_log(phba,
2970 "%d:0601 FARP-REQ received from DID x%x\n",
2973 /* We will only support match on WWPN or WWNN */
2974 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
2979 /* If this FARP command is searching for my portname */
2980 if (fp->Mflags & FARP_MATCH_PORT) {
2981 if (memcmp(&fp->RportName, &phba->fc_portname,
2982 sizeof (struct lpfc_name)) == 0)
2986 /* If this FARP command is searching for my nodename */
2987 if (fp->Mflags & FARP_MATCH_NODE) {
2988 if (memcmp(&fp->RnodeName, &phba->fc_nodename,
2989 sizeof (struct lpfc_name)) == 0)
2994 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
2995 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
2996 /* Log back into the node before sending the FARP. */
2997 if (fp->Rflags & FARP_REQUEST_PLOGI) {
2998 ndlp->nlp_prev_state = ndlp->nlp_state;
2999 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
3000 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
3001 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
3004 /* Send a FARP response to that node */
3005 if (fp->Rflags & FARP_REQUEST_FARPR) {
3006 lpfc_issue_els_farpr(phba, did, 0);
3014 lpfc_els_rcv_farpr(struct lpfc_hba * phba,
3015 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
3017 struct lpfc_dmabuf *pcmd;
3022 icmd = &cmdiocb->iocb;
3023 did = icmd->un.elsreq64.remoteID;
3024 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3025 lp = (uint32_t *) pcmd->virt;
3028 /* FARP-RSP received from DID <did> */
3029 lpfc_printf_log(phba,
3032 "%d:0600 FARP-RSP received from DID x%x\n",
3035 /* ACCEPT the Farp resp request */
3036 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
3042 lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3043 struct lpfc_nodelist * fan_ndlp)
3045 struct lpfc_dmabuf *pcmd;
3050 struct lpfc_nodelist *ndlp, *next_ndlp;
3053 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n",
3056 icmd = &cmdiocb->iocb;
3057 did = icmd->un.elsreq64.remoteID;
3058 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3059 lp = (uint32_t *)pcmd->virt;
3064 /* FAN received; Fan does not have a reply sequence */
3066 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
3067 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3068 sizeof(struct lpfc_name)) != 0) ||
3069 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3070 sizeof(struct lpfc_name)) != 0)) {
3072 * This node has switched fabrics. FLOGI is required
3073 * Clean up the old rpi's
3076 list_for_each_entry_safe(ndlp, next_ndlp,
3077 &phba->fc_npr_list, nlp_listp) {
3079 if (ndlp->nlp_type & NLP_FABRIC) {
3081 * Clean up old Fabric, Nameserver and
3082 * other NLP_FABRIC logins
3084 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3085 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3086 /* Fail outstanding I/O now since this
3087 * device is marked for PLOGI
3089 lpfc_unreg_rpi(phba, ndlp);
3093 phba->hba_state = LPFC_FLOGI;
3094 lpfc_set_disctmo(phba);
3095 lpfc_initial_flogi(phba);
3098 /* Discovery not needed,
3099 * move the nodes to their original state.
3101 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
3104 switch (ndlp->nlp_prev_state) {
3105 case NLP_STE_UNMAPPED_NODE:
3106 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3107 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
3108 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
3111 case NLP_STE_MAPPED_NODE:
3112 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3113 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
3114 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
3122 /* Start discovery - this should just do CLEAR_LA */
3123 lpfc_disc_start(phba);
3129 lpfc_els_timeout(unsigned long ptr)
3131 struct lpfc_hba *phba;
3132 unsigned long iflag;
3134 phba = (struct lpfc_hba *)ptr;
3137 spin_lock_irqsave(phba->host->host_lock, iflag);
3138 if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
3139 phba->work_hba_events |= WORKER_ELS_TMO;
3140 if (phba->work_wait)
3141 wake_up(phba->work_wait);
3143 spin_unlock_irqrestore(phba->host->host_lock, iflag);
3148 lpfc_els_timeout_handler(struct lpfc_hba *phba)
3150 struct lpfc_sli_ring *pring;
3151 struct lpfc_iocbq *tmp_iocb, *piocb;
3153 struct lpfc_dmabuf *pcmd;
3154 struct list_head *dlp;
3156 uint32_t els_command;
3162 spin_lock_irq(phba->host->host_lock);
3163 /* If the timer is already canceled do nothing */
3164 if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
3165 spin_unlock_irq(phba->host->host_lock);
3168 timeout = (uint32_t)(phba->fc_ratov << 1);
3170 pring = &phba->sli.ring[LPFC_ELS_RING];
3171 dlp = &pring->txcmplq;
3173 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3176 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3179 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3180 elscmd = (uint32_t *) (pcmd->virt);
3181 els_command = *elscmd;
3183 if ((els_command == ELS_CMD_FARP)
3184 || (els_command == ELS_CMD_FARPR)) {
3188 if (piocb->drvrTimeout > 0) {
3189 if (piocb->drvrTimeout >= timeout) {
3190 piocb->drvrTimeout -= timeout;
3192 piocb->drvrTimeout = 0;
3197 list_del(&piocb->list);
3198 pring->txcmplq_cnt--;
3200 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
3201 struct lpfc_nodelist *ndlp;
3202 spin_unlock_irq(phba->host->host_lock);
3203 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
3204 spin_lock_irq(phba->host->host_lock);
3205 remote_ID = ndlp->nlp_DID;
3206 if (cmd->un.elsreq64.bdl.ulpIoTag32) {
3207 lpfc_sli_issue_abort_iotag32(phba,
3211 remote_ID = cmd->un.elsreq64.remoteID;
3214 lpfc_printf_log(phba,
3217 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
3218 phba->brd_no, els_command,
3219 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3222 * The iocb has timed out; abort it.
3224 if (piocb->iocb_cmpl) {
3225 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3226 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3227 spin_unlock_irq(phba->host->host_lock);
3228 (piocb->iocb_cmpl) (phba, piocb, piocb);
3229 spin_lock_irq(phba->host->host_lock);
3231 lpfc_sli_release_iocbq(phba, piocb);
3233 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
3234 phba->els_tmofunc.expires = jiffies + HZ * timeout;
3235 add_timer(&phba->els_tmofunc);
3237 spin_unlock_irq(phba->host->host_lock);
3241 lpfc_els_flush_cmd(struct lpfc_hba * phba)
3243 struct lpfc_sli_ring *pring;
3244 struct lpfc_iocbq *tmp_iocb, *piocb;
3246 struct lpfc_dmabuf *pcmd;
3248 uint32_t els_command;
3250 pring = &phba->sli.ring[LPFC_ELS_RING];
3251 spin_lock_irq(phba->host->host_lock);
3252 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3255 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3259 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3260 if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
3261 (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
3262 (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
3263 (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
3267 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3268 elscmd = (uint32_t *) (pcmd->virt);
3269 els_command = *elscmd;
3271 list_del(&piocb->list);
3272 pring->txcmplq_cnt--;
3274 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3275 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3277 if (piocb->iocb_cmpl) {
3278 spin_unlock_irq(phba->host->host_lock);
3279 (piocb->iocb_cmpl) (phba, piocb, piocb);
3280 spin_lock_irq(phba->host->host_lock);
3282 lpfc_sli_release_iocbq(phba, piocb);
3285 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3288 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3291 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3292 elscmd = (uint32_t *) (pcmd->virt);
3293 els_command = *elscmd;
3295 list_del(&piocb->list);
3296 pring->txcmplq_cnt--;
3298 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3299 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3301 if (piocb->iocb_cmpl) {
3302 spin_unlock_irq(phba->host->host_lock);
3303 (piocb->iocb_cmpl) (phba, piocb, piocb);
3304 spin_lock_irq(phba->host->host_lock);
3306 lpfc_sli_release_iocbq(phba, piocb);
3308 spin_unlock_irq(phba->host->host_lock);
3313 lpfc_els_unsol_event(struct lpfc_hba * phba,
3314 struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
3316 struct lpfc_sli *psli;
3317 struct lpfc_nodelist *ndlp;
3318 struct lpfc_dmabuf *mp;
3325 uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
3326 uint32_t rjt_err = 0;
3329 icmd = &elsiocb->iocb;
3331 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3332 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
3333 /* Not enough posted buffers; Try posting more buffers */
3334 phba->fc_stat.NoRcvBuf++;
3335 lpfc_post_buffer(phba, pring, 0, 1);
3339 /* If there are no BDEs associated with this IOCB,
3340 * there is nothing to do.
3342 if (icmd->ulpBdeCount == 0)
3345 /* type of ELS cmd is first 32bit word in packet */
3346 mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
3350 cont64[0].addrLow));
3357 lp = (uint32_t *) mp->virt;
3359 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
3361 if (icmd->ulpStatus) {
3362 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3368 /* Check to see if link went down during discovery */
3369 if (lpfc_els_chk_latt(phba)) {
3370 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3376 did = icmd->un.rcvels.remoteID;
3377 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
3379 /* Cannot find existing Fabric ndlp, so allocate a new one */
3380 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3382 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3388 lpfc_nlp_init(phba, ndlp, did);
3390 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3391 ndlp->nlp_type |= NLP_FABRIC;
3395 phba->fc_stat.elsRcvFrame++;
3396 elsiocb->context1 = ndlp;
3397 elsiocb->context2 = mp;
3399 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
3400 cmd &= ELS_CMD_MASK;
3402 /* ELS command <elsCmd> received from NPORT <did> */
3403 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3404 "%d:0112 ELS command x%x received from NPORT x%x "
3405 "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
3409 phba->fc_stat.elsRcvPLOGI++;
3410 if (phba->hba_state < LPFC_DISC_AUTH) {
3414 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3417 phba->fc_stat.elsRcvFLOGI++;
3418 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3420 mempool_free( ndlp, phba->nlp_mem_pool);
3424 phba->fc_stat.elsRcvLOGO++;
3425 if (phba->hba_state < LPFC_DISC_AUTH) {
3429 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
3432 phba->fc_stat.elsRcvPRLO++;
3433 if (phba->hba_state < LPFC_DISC_AUTH) {
3437 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
3440 phba->fc_stat.elsRcvRSCN++;
3441 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3443 mempool_free( ndlp, phba->nlp_mem_pool);
3447 phba->fc_stat.elsRcvADISC++;
3448 if (phba->hba_state < LPFC_DISC_AUTH) {
3452 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
3455 phba->fc_stat.elsRcvPDISC++;
3456 if (phba->hba_state < LPFC_DISC_AUTH) {
3460 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
3463 phba->fc_stat.elsRcvFARPR++;
3464 lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
3467 phba->fc_stat.elsRcvFARP++;
3468 lpfc_els_rcv_farp(phba, elsiocb, ndlp);
3471 phba->fc_stat.elsRcvFAN++;
3472 lpfc_els_rcv_fan(phba, elsiocb, ndlp);
3475 phba->fc_stat.elsRcvPRLI++;
3476 if (phba->hba_state < LPFC_DISC_AUTH) {
3480 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
3483 phba->fc_stat.elsRcvLIRR++;
3484 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3486 mempool_free( ndlp, phba->nlp_mem_pool);
3490 phba->fc_stat.elsRcvRPS++;
3491 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3493 mempool_free( ndlp, phba->nlp_mem_pool);
3497 phba->fc_stat.elsRcvRPL++;
3498 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3500 mempool_free( ndlp, phba->nlp_mem_pool);
3504 phba->fc_stat.elsRcvRNID++;
3505 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3507 mempool_free( ndlp, phba->nlp_mem_pool);
3511 /* Unsupported ELS command, reject */
3514 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3515 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3516 "%d:0115 Unknown ELS command x%x received from "
3517 "NPORT x%x\n", phba->brd_no, cmd, did);
3519 mempool_free( ndlp, phba->nlp_mem_pool);
3524 /* check if need to LS_RJT received ELS cmd */
3526 stat.un.b.lsRjtRsvd0 = 0;
3527 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3528 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3529 stat.un.b.vendorUnique = 0;
3530 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
3533 if (elsiocb->context2) {
3534 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3538 /* check if need to drop received ELS cmd */
3539 if (drop_cmd == 1) {
3540 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3541 "%d:0111 Dropping received ELS cmd "
3542 "Data: x%x x%x x%x\n", phba->brd_no,
3543 icmd->ulpStatus, icmd->un.ulpWord[4],
3545 phba->fc_stat.elsRcvDrop++;