1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
60 lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
62 uint8_t *name = (uint8_t *)&ndlp->nlp_portname;
65 spin_lock_irq(phba->host->host_lock);
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
67 spin_unlock_irq(phba->host->host_lock);
72 * If a discovery event readded nodev_timer after timer
73 * firing and before processing the timer, cancel the
76 spin_unlock_irq(phba->host->host_lock);
77 del_timer_sync(&ndlp->nlp_tmofunc);
78 spin_lock_irq(phba->host->host_lock);
80 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
82 if (ndlp->nlp_sid != NLP_NO_SID) {
84 /* flush the target */
85 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
86 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88 spin_unlock_irq(phba->host->host_lock);
91 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92 "%d:0203 Nodev timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n",
96 *name, *(name+1), *(name+2), *(name+3),
97 *(name+4), *(name+5), *(name+6), *(name+7),
98 ndlp->nlp_DID, ndlp->nlp_flag,
99 ndlp->nlp_state, ndlp->nlp_rpi);
101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
102 "%d:0204 Nodev timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n",
106 *name, *(name+1), *(name+2), *(name+3),
107 *(name+4), *(name+5), *(name+6), *(name+7),
108 ndlp->nlp_DID, ndlp->nlp_flag,
109 ndlp->nlp_state, ndlp->nlp_rpi);
112 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
117 lpfc_work_list_done(struct lpfc_hba * phba)
119 struct lpfc_work_evt *evtp = NULL;
120 struct lpfc_nodelist *ndlp;
123 spin_lock_irq(phba->host->host_lock);
124 while(!list_empty(&phba->work_list)) {
125 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
127 spin_unlock_irq(phba->host->host_lock);
130 case LPFC_EVT_NODEV_TMO:
131 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
132 lpfc_process_nodev_timeout(phba, ndlp);
135 case LPFC_EVT_ELS_RETRY:
136 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
137 lpfc_els_retry_delay_handler(ndlp);
140 case LPFC_EVT_ONLINE:
141 if (phba->hba_state < LPFC_LINK_DOWN)
142 *(int *)(evtp->evt_arg1) = lpfc_online(phba);
144 *(int *)(evtp->evt_arg1) = 0;
145 complete((struct completion *)(evtp->evt_arg2));
147 case LPFC_EVT_OFFLINE:
148 if (phba->hba_state >= LPFC_LINK_DOWN)
150 lpfc_sli_brdrestart(phba);
151 *(int *)(evtp->evt_arg1) =
152 lpfc_sli_brdready(phba,HS_FFRDY | HS_MBRDY);
153 complete((struct completion *)(evtp->evt_arg2));
155 case LPFC_EVT_WARM_START:
156 if (phba->hba_state >= LPFC_LINK_DOWN)
158 lpfc_sli_brdreset(phba);
159 lpfc_hba_down_post(phba);
160 *(int *)(evtp->evt_arg1) =
161 lpfc_sli_brdready(phba, HS_MBRDY);
162 complete((struct completion *)(evtp->evt_arg2));
165 if (phba->hba_state >= LPFC_LINK_DOWN)
167 *(int *)(evtp->evt_arg1) = lpfc_sli_brdkill(phba);
168 complete((struct completion *)(evtp->evt_arg2));
173 spin_lock_irq(phba->host->host_lock);
175 spin_unlock_irq(phba->host->host_lock);
180 lpfc_work_done(struct lpfc_hba * phba)
182 struct lpfc_sli_ring *pring;
186 uint32_t work_hba_events;
188 spin_lock_irq(phba->host->host_lock);
189 ha_copy = phba->work_ha;
191 work_hba_events=phba->work_hba_events;
192 spin_unlock_irq(phba->host->host_lock);
194 if (ha_copy & HA_ERATT)
195 lpfc_handle_eratt(phba);
197 if (ha_copy & HA_MBATT)
198 lpfc_sli_handle_mb_event(phba);
200 if (ha_copy & HA_LATT)
201 lpfc_handle_latt(phba);
203 if (work_hba_events & WORKER_DISC_TMO)
204 lpfc_disc_timeout_handler(phba);
206 if (work_hba_events & WORKER_ELS_TMO)
207 lpfc_els_timeout_handler(phba);
209 if (work_hba_events & WORKER_MBOX_TMO)
210 lpfc_mbox_timeout_handler(phba);
212 if (work_hba_events & WORKER_FDMI_TMO)
213 lpfc_fdmi_tmo_handler(phba);
215 spin_lock_irq(phba->host->host_lock);
216 phba->work_hba_events &= ~work_hba_events;
217 spin_unlock_irq(phba->host->host_lock);
219 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
220 pring = &phba->sli.ring[i];
221 if ((ha_copy & HA_RXATT)
222 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
223 if (pring->flag & LPFC_STOP_IOCB_MASK) {
224 pring->flag |= LPFC_DEFERRED_RING_EVENT;
226 lpfc_sli_handle_slow_ring_event(phba, pring,
229 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
232 * Turn on Ring interrupts
234 spin_lock_irq(phba->host->host_lock);
235 control = readl(phba->HCregaddr);
236 control |= (HC_R0INT_ENA << i);
237 writel(control, phba->HCregaddr);
238 readl(phba->HCregaddr); /* flush */
239 spin_unlock_irq(phba->host->host_lock);
243 lpfc_work_list_done (phba);
248 check_work_wait_done(struct lpfc_hba *phba) {
250 spin_lock_irq(phba->host->host_lock);
252 phba->work_hba_events ||
253 (!list_empty(&phba->work_list)) ||
254 kthread_should_stop()) {
255 spin_unlock_irq(phba->host->host_lock);
258 spin_unlock_irq(phba->host->host_lock);
264 lpfc_do_work(void *p)
266 struct lpfc_hba *phba = p;
268 DECLARE_WAIT_QUEUE_HEAD(work_waitq);
270 set_user_nice(current, -20);
271 phba->work_wait = &work_waitq;
275 rc = wait_event_interruptible(work_waitq,
276 check_work_wait_done(phba));
279 if (kthread_should_stop())
282 lpfc_work_done(phba);
285 phba->work_wait = NULL;
290 * This is only called to handle FC worker events. Since this a rare
291 * occurance, we allocate a struct lpfc_work_evt structure here instead of
292 * embedding it in the IOCB.
295 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
298 struct lpfc_work_evt *evtp;
301 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
302 * be queued to worker thread for processing
304 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
308 evtp->evt_arg1 = arg1;
309 evtp->evt_arg2 = arg2;
312 list_add_tail(&evtp->evt_listp, &phba->work_list);
313 spin_lock_irq(phba->host->host_lock);
315 wake_up(phba->work_wait);
316 spin_unlock_irq(phba->host->host_lock);
322 lpfc_linkdown(struct lpfc_hba * phba)
324 struct lpfc_sli *psli;
325 struct lpfc_nodelist *ndlp, *next_ndlp;
326 struct list_head *listp, *node_list[7];
331 /* sysfs or selective reset may call this routine to clean up */
332 if (phba->hba_state >= LPFC_LINK_DOWN) {
333 if (phba->hba_state == LPFC_LINK_DOWN)
336 spin_lock_irq(phba->host->host_lock);
337 phba->hba_state = LPFC_LINK_DOWN;
338 spin_unlock_irq(phba->host->host_lock);
341 /* Clean up any firmware default rpi's */
342 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
343 lpfc_unreg_did(phba, 0xffffffff, mb);
344 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
345 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
346 == MBX_NOT_FINISHED) {
347 mempool_free( mb, phba->mbox_mem_pool);
351 /* Cleanup any outstanding RSCN activity */
352 lpfc_els_flush_rscn(phba);
354 /* Cleanup any outstanding ELS commands */
355 lpfc_els_flush_cmd(phba);
357 /* Issue a LINK DOWN event to all nodes */
358 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
359 node_list[1] = &phba->fc_nlpmap_list;
360 node_list[2] = &phba->fc_nlpunmap_list;
361 node_list[3] = &phba->fc_prli_list;
362 node_list[4] = &phba->fc_reglogin_list;
363 node_list[5] = &phba->fc_adisc_list;
364 node_list[6] = &phba->fc_plogi_list;
365 for (i = 0; i < 7; i++) {
366 listp = node_list[i];
367 if (list_empty(listp))
370 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
372 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
373 NLP_EVT_DEVICE_RECOVERY);
375 /* Check config parameter use-adisc or FCP-2 */
376 if ((rc != NLP_STE_FREED_NODE) &&
377 (phba->cfg_use_adisc == 0) &&
378 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
379 /* We know we will have to relogin, so
380 * unreglogin the rpi right now to fail
381 * any outstanding I/Os quickly.
383 lpfc_unreg_rpi(phba, ndlp);
388 /* free any ndlp's on unused list */
389 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
391 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
394 /* Setup myDID for link up if we are in pt2pt mode */
395 if (phba->fc_flag & FC_PT2PT) {
397 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
398 lpfc_config_link(phba, mb);
399 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
400 if (lpfc_sli_issue_mbox
401 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
402 == MBX_NOT_FINISHED) {
403 mempool_free( mb, phba->mbox_mem_pool);
406 spin_lock_irq(phba->host->host_lock);
407 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
408 spin_unlock_irq(phba->host->host_lock);
410 spin_lock_irq(phba->host->host_lock);
411 phba->fc_flag &= ~FC_LBIT;
412 spin_unlock_irq(phba->host->host_lock);
414 /* Turn off discovery timer if its running */
415 lpfc_can_disctmo(phba);
417 /* Must process IOCBs on all rings to handle ABORTed I/Os */
422 lpfc_linkup(struct lpfc_hba * phba)
424 struct lpfc_nodelist *ndlp, *next_ndlp;
425 struct list_head *listp, *node_list[7];
428 spin_lock_irq(phba->host->host_lock);
429 phba->hba_state = LPFC_LINK_UP;
430 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
431 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
432 phba->fc_flag |= FC_NDISC_ACTIVE;
433 phba->fc_ns_retry = 0;
434 spin_unlock_irq(phba->host->host_lock);
437 node_list[0] = &phba->fc_plogi_list;
438 node_list[1] = &phba->fc_adisc_list;
439 node_list[2] = &phba->fc_reglogin_list;
440 node_list[3] = &phba->fc_prli_list;
441 node_list[4] = &phba->fc_nlpunmap_list;
442 node_list[5] = &phba->fc_nlpmap_list;
443 node_list[6] = &phba->fc_npr_list;
444 for (i = 0; i < 7; i++) {
445 listp = node_list[i];
446 if (list_empty(listp))
449 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
450 if (phba->fc_flag & FC_LBIT) {
451 if (ndlp->nlp_type & NLP_FABRIC) {
452 /* On Linkup its safe to clean up the
453 * ndlp from Fabric connections.
455 lpfc_nlp_list(phba, ndlp,
457 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
458 /* Fail outstanding IO now since device
459 * is marked for PLOGI.
461 lpfc_unreg_rpi(phba, ndlp);
467 /* free any ndlp's on unused list */
468 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
470 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
477 * This routine handles processing a CLEAR_LA mailbox
478 * command upon completion. It is setup in the LPFC_MBOXQ
479 * as the completion routine when the command is
480 * handed off to the SLI layer.
483 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
485 struct lpfc_sli *psli;
491 /* Since we don't do discovery right now, turn these off here */
492 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
493 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
494 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
496 /* Check for error */
497 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
498 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
499 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
500 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
502 phba->brd_no, mb->mbxStatus, phba->hba_state);
504 phba->hba_state = LPFC_HBA_ERROR;
508 if (phba->fc_flag & FC_ABORT_DISCOVERY)
511 phba->num_disc_nodes = 0;
512 /* go thru NPR list and issue ELS PLOGIs */
513 if (phba->fc_npr_cnt) {
514 lpfc_els_disc_plogi(phba);
517 if (!phba->num_disc_nodes) {
518 spin_lock_irq(phba->host->host_lock);
519 phba->fc_flag &= ~FC_NDISC_ACTIVE;
520 spin_unlock_irq(phba->host->host_lock);
523 phba->hba_state = LPFC_HBA_READY;
526 /* Device Discovery completes */
527 lpfc_printf_log(phba,
530 "%d:0225 Device Discovery completes\n",
533 mempool_free( pmb, phba->mbox_mem_pool);
535 spin_lock_irq(phba->host->host_lock);
536 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
537 if (phba->fc_flag & FC_ESTABLISH_LINK) {
538 phba->fc_flag &= ~FC_ESTABLISH_LINK;
540 spin_unlock_irq(phba->host->host_lock);
542 del_timer_sync(&phba->fc_estabtmo);
544 lpfc_can_disctmo(phba);
546 /* turn on Link Attention interrupts */
547 spin_lock_irq(phba->host->host_lock);
548 psli->sli_flag |= LPFC_PROCESS_LA;
549 control = readl(phba->HCregaddr);
550 control |= HC_LAINT_ENA;
551 writel(control, phba->HCregaddr);
552 readl(phba->HCregaddr); /* flush */
553 spin_unlock_irq(phba->host->host_lock);
559 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
561 struct lpfc_sli *psli = &phba->sli;
564 if (pmb->mb.mbxStatus)
567 mempool_free(pmb, phba->mbox_mem_pool);
569 if (phba->fc_topology == TOPOLOGY_LOOP &&
570 phba->fc_flag & FC_PUBLIC_LOOP &&
571 !(phba->fc_flag & FC_LBIT)) {
572 /* Need to wait for FAN - use discovery timer
573 * for timeout. hba_state is identically
574 * LPFC_LOCAL_CFG_LINK while waiting for FAN
576 lpfc_set_disctmo(phba);
580 /* Start discovery by sending a FLOGI. hba_state is identically
581 * LPFC_FLOGI while waiting for FLOGI cmpl
583 phba->hba_state = LPFC_FLOGI;
584 lpfc_set_disctmo(phba);
585 lpfc_initial_flogi(phba);
589 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
590 "%d:0306 CONFIG_LINK mbxStatus error x%x "
592 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
596 phba->hba_state = LPFC_HBA_ERROR;
598 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
599 "%d:0200 CONFIG_LINK bad hba state x%x\n",
600 phba->brd_no, phba->hba_state);
602 lpfc_clear_la(phba, pmb);
603 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
604 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
605 if (rc == MBX_NOT_FINISHED) {
606 mempool_free(pmb, phba->mbox_mem_pool);
607 lpfc_disc_flush_list(phba);
608 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
609 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
610 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
611 phba->hba_state = LPFC_HBA_READY;
617 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
619 struct lpfc_sli *psli = &phba->sli;
620 MAILBOX_t *mb = &pmb->mb;
621 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
624 /* Check for error */
626 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
627 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
628 "%d:0319 READ_SPARAM mbxStatus error x%x "
630 phba->brd_no, mb->mbxStatus, phba->hba_state);
633 phba->hba_state = LPFC_HBA_ERROR;
637 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
638 sizeof (struct serv_parm));
639 memcpy((uint8_t *) & phba->fc_nodename,
640 (uint8_t *) & phba->fc_sparam.nodeName,
641 sizeof (struct lpfc_name));
642 memcpy((uint8_t *) & phba->fc_portname,
643 (uint8_t *) & phba->fc_sparam.portName,
644 sizeof (struct lpfc_name));
645 lpfc_mbuf_free(phba, mp->virt, mp->phys);
647 mempool_free( pmb, phba->mbox_mem_pool);
651 pmb->context1 = NULL;
652 lpfc_mbuf_free(phba, mp->virt, mp->phys);
654 if (phba->hba_state != LPFC_CLEAR_LA) {
655 lpfc_clear_la(phba, pmb);
656 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
657 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
658 == MBX_NOT_FINISHED) {
659 mempool_free( pmb, phba->mbox_mem_pool);
660 lpfc_disc_flush_list(phba);
661 psli->ring[(psli->ip_ring)].flag &=
662 ~LPFC_STOP_IOCB_EVENT;
663 psli->ring[(psli->fcp_ring)].flag &=
664 ~LPFC_STOP_IOCB_EVENT;
665 psli->ring[(psli->next_ring)].flag &=
666 ~LPFC_STOP_IOCB_EVENT;
667 phba->hba_state = LPFC_HBA_READY;
670 mempool_free( pmb, phba->mbox_mem_pool);
676 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
679 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
680 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
681 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
683 spin_lock_irq(phba->host->host_lock);
684 switch (la->UlnkSpeed) {
686 phba->fc_linkspeed = LA_1GHZ_LINK;
689 phba->fc_linkspeed = LA_2GHZ_LINK;
692 phba->fc_linkspeed = LA_4GHZ_LINK;
695 phba->fc_linkspeed = LA_UNKNW_LINK;
699 phba->fc_topology = la->topology;
701 if (phba->fc_topology == TOPOLOGY_LOOP) {
702 /* Get Loop Map information */
705 phba->fc_flag |= FC_LBIT;
707 phba->fc_myDID = la->granted_AL_PA;
708 i = la->un.lilpBde64.tus.f.bdeSize;
711 phba->alpa_map[0] = 0;
713 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
724 numalpa = phba->alpa_map[0];
726 while (j < numalpa) {
727 memset(un.pamap, 0, 16);
728 for (k = 1; j < numalpa; k++) {
730 phba->alpa_map[j + 1];
735 /* Link Up Event ALPA map */
736 lpfc_printf_log(phba,
739 "%d:1304 Link Up Event "
740 "ALPA map Data: x%x "
743 un.pa.wd1, un.pa.wd2,
744 un.pa.wd3, un.pa.wd4);
749 phba->fc_myDID = phba->fc_pref_DID;
750 phba->fc_flag |= FC_LBIT;
752 spin_unlock_irq(phba->host->host_lock);
756 lpfc_read_sparam(phba, sparam_mbox);
757 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
758 lpfc_sli_issue_mbox(phba, sparam_mbox,
759 (MBX_NOWAIT | MBX_STOP_IOCB));
763 phba->hba_state = LPFC_LOCAL_CFG_LINK;
764 lpfc_config_link(phba, cfglink_mbox);
765 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
766 lpfc_sli_issue_mbox(phba, cfglink_mbox,
767 (MBX_NOWAIT | MBX_STOP_IOCB));
772 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
774 struct lpfc_sli *psli = &phba->sli;
778 /* turn on Link Attention interrupts - no CLEAR_LA needed */
779 spin_lock_irq(phba->host->host_lock);
780 psli->sli_flag |= LPFC_PROCESS_LA;
781 control = readl(phba->HCregaddr);
782 control |= HC_LAINT_ENA;
783 writel(control, phba->HCregaddr);
784 readl(phba->HCregaddr); /* flush */
785 spin_unlock_irq(phba->host->host_lock);
789 * This routine handles processing a READ_LA mailbox
790 * command upon completion. It is setup in the LPFC_MBOXQ
791 * as the completion routine when the command is
792 * handed off to the SLI layer.
795 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
798 MAILBOX_t *mb = &pmb->mb;
799 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
801 /* Check for error */
803 lpfc_printf_log(phba,
806 "%d:1307 READ_LA mbox error x%x state x%x\n",
808 mb->mbxStatus, phba->hba_state);
809 lpfc_mbx_issue_link_down(phba);
810 phba->hba_state = LPFC_HBA_ERROR;
811 goto lpfc_mbx_cmpl_read_la_free_mbuf;
814 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
816 memcpy(&phba->alpa_map[0], mp->virt, 128);
818 spin_lock_irq(phba->host->host_lock);
820 phba->fc_flag |= FC_BYPASSED_MODE;
822 phba->fc_flag &= ~FC_BYPASSED_MODE;
823 spin_unlock_irq(phba->host->host_lock);
825 if (((phba->fc_eventTag + 1) < la->eventTag) ||
826 (phba->fc_eventTag == la->eventTag)) {
827 phba->fc_stat.LinkMultiEvent++;
828 if (la->attType == AT_LINK_UP) {
829 if (phba->fc_eventTag != 0)
834 phba->fc_eventTag = la->eventTag;
836 if (la->attType == AT_LINK_UP) {
837 phba->fc_stat.LinkUp++;
838 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
839 "%d:1303 Link Up Event x%x received "
840 "Data: x%x x%x x%x x%x\n",
841 phba->brd_no, la->eventTag, phba->fc_eventTag,
842 la->granted_AL_PA, la->UlnkSpeed,
844 lpfc_mbx_process_link_up(phba, la);
846 phba->fc_stat.LinkDown++;
847 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
848 "%d:1305 Link Down Event x%x received "
849 "Data: x%x x%x x%x\n",
850 phba->brd_no, la->eventTag, phba->fc_eventTag,
851 phba->hba_state, phba->fc_flag);
852 lpfc_mbx_issue_link_down(phba);
855 lpfc_mbx_cmpl_read_la_free_mbuf:
856 lpfc_mbuf_free(phba, mp->virt, mp->phys);
858 mempool_free(pmb, phba->mbox_mem_pool);
863 * This routine handles processing a REG_LOGIN mailbox
864 * command upon completion. It is setup in the LPFC_MBOXQ
865 * as the completion routine when the command is
866 * handed off to the SLI layer.
869 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
871 struct lpfc_sli *psli;
873 struct lpfc_dmabuf *mp;
874 struct lpfc_nodelist *ndlp;
879 ndlp = (struct lpfc_nodelist *) pmb->context2;
880 mp = (struct lpfc_dmabuf *) (pmb->context1);
882 pmb->context1 = NULL;
884 /* Good status, call state machine */
885 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
886 lpfc_mbuf_free(phba, mp->virt, mp->phys);
888 mempool_free( pmb, phba->mbox_mem_pool);
894 * This routine handles processing a Fabric REG_LOGIN mailbox
895 * command upon completion. It is setup in the LPFC_MBOXQ
896 * as the completion routine when the command is
897 * handed off to the SLI layer.
900 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
902 struct lpfc_sli *psli;
904 struct lpfc_dmabuf *mp;
905 struct lpfc_nodelist *ndlp;
906 struct lpfc_nodelist *ndlp_fdmi;
912 ndlp = (struct lpfc_nodelist *) pmb->context2;
913 mp = (struct lpfc_dmabuf *) (pmb->context1);
916 lpfc_mbuf_free(phba, mp->virt, mp->phys);
918 mempool_free( pmb, phba->mbox_mem_pool);
919 mempool_free( ndlp, phba->nlp_mem_pool);
921 /* FLOGI failed, so just use loop map to make discovery list */
922 lpfc_disc_list_loopmap(phba);
924 /* Start discovery */
925 lpfc_disc_start(phba);
929 pmb->context1 = NULL;
931 ndlp->nlp_rpi = mb->un.varWords[0];
932 ndlp->nlp_type |= NLP_FABRIC;
933 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
934 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
936 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
937 /* This NPort has been assigned an NPort_ID by the fabric as a
938 * result of the completed fabric login. Issue a State Change
939 * Registration (SCR) ELS request to the fabric controller
940 * (SCR_DID) so that this NPort gets RSCN events from the
943 lpfc_issue_els_scr(phba, SCR_DID, 0);
945 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
947 /* Allocate a new node instance. If the pool is empty,
948 * start the discovery process and skip the Nameserver
949 * login process. This is attempted again later on.
950 * Otherwise, issue a Port Login (PLOGI) to NameServer.
952 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
954 lpfc_disc_start(phba);
955 lpfc_mbuf_free(phba, mp->virt, mp->phys);
957 mempool_free( pmb, phba->mbox_mem_pool);
960 lpfc_nlp_init(phba, ndlp, NameServer_DID);
961 ndlp->nlp_type |= NLP_FABRIC;
964 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
965 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
966 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
967 if (phba->cfg_fdmi_on) {
968 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
971 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
972 ndlp_fdmi->nlp_type |= NLP_FABRIC;
973 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
974 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
979 lpfc_mbuf_free(phba, mp->virt, mp->phys);
981 mempool_free( pmb, phba->mbox_mem_pool);
986 * This routine handles processing a NameServer REG_LOGIN mailbox
987 * command upon completion. It is setup in the LPFC_MBOXQ
988 * as the completion routine when the command is
989 * handed off to the SLI layer.
992 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
994 struct lpfc_sli *psli;
996 struct lpfc_dmabuf *mp;
997 struct lpfc_nodelist *ndlp;
1002 ndlp = (struct lpfc_nodelist *) pmb->context2;
1003 mp = (struct lpfc_dmabuf *) (pmb->context1);
1005 if (mb->mbxStatus) {
1006 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1008 mempool_free( pmb, phba->mbox_mem_pool);
1009 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1011 /* RegLogin failed, so just use loop map to make discovery
1013 lpfc_disc_list_loopmap(phba);
1015 /* Start discovery */
1016 lpfc_disc_start(phba);
1020 pmb->context1 = NULL;
1022 ndlp->nlp_rpi = mb->un.varWords[0];
1023 ndlp->nlp_type |= NLP_FABRIC;
1024 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1025 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1027 if (phba->hba_state < LPFC_HBA_READY) {
1028 /* Link up discovery requires Fabrib registration. */
1029 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1030 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1031 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1034 phba->fc_ns_retry = 0;
1035 /* Good status, issue CT Request to NameServer */
1036 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1037 /* Cannot issue NameServer Query, so finish up discovery */
1038 lpfc_disc_start(phba);
1041 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1043 mempool_free( pmb, phba->mbox_mem_pool);
1049 lpfc_register_remote_port(struct lpfc_hba * phba,
1050 struct lpfc_nodelist * ndlp)
1052 struct fc_rport *rport;
1053 struct lpfc_rport_data *rdata;
1054 struct fc_rport_identifiers rport_ids;
1056 /* Remote port has reappeared. Re-register w/ FC transport */
1057 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1058 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1059 rport_ids.port_id = ndlp->nlp_DID;
1060 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1062 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1064 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1065 "Warning: fc_remote_port_add failed\n");
1069 /* initialize static port data */
1070 rport->maxframe_size = ndlp->nlp_maxframe;
1071 rport->supported_classes = ndlp->nlp_class_sup;
1072 if ((rport->scsi_target_id != -1) &&
1073 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1074 ndlp->nlp_sid = rport->scsi_target_id;
1076 rdata = rport->dd_data;
1077 rdata->pnode = ndlp;
1079 if (ndlp->nlp_type & NLP_FCP_TARGET)
1080 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1081 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1082 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1085 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1086 fc_remote_port_rolechg(rport, rport_ids.roles);
1093 lpfc_unregister_remote_port(struct lpfc_hba * phba,
1094 struct lpfc_nodelist * ndlp)
1096 struct fc_rport *rport = ndlp->rport;
1097 struct lpfc_rport_data *rdata = rport->dd_data;
1100 rdata->pnode = NULL;
1101 fc_remote_port_delete(rport);
1107 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1109 enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1110 struct lpfc_sli *psli;
1113 /* Sanity check to ensure we are not moving to / from the same list */
1114 if ((nlp->nlp_flag & NLP_LIST_MASK) == list)
1115 if (list != NLP_NO_LIST)
1118 spin_lock_irq(phba->host->host_lock);
1119 switch (nlp->nlp_flag & NLP_LIST_MASK) {
1120 case NLP_NO_LIST: /* Not on any list */
1122 case NLP_UNUSED_LIST:
1123 phba->fc_unused_cnt--;
1124 list_del(&nlp->nlp_listp);
1126 case NLP_PLOGI_LIST:
1127 phba->fc_plogi_cnt--;
1128 list_del(&nlp->nlp_listp);
1130 case NLP_ADISC_LIST:
1131 phba->fc_adisc_cnt--;
1132 list_del(&nlp->nlp_listp);
1134 case NLP_REGLOGIN_LIST:
1135 phba->fc_reglogin_cnt--;
1136 list_del(&nlp->nlp_listp);
1139 phba->fc_prli_cnt--;
1140 list_del(&nlp->nlp_listp);
1142 case NLP_UNMAPPED_LIST:
1143 phba->fc_unmap_cnt--;
1144 list_del(&nlp->nlp_listp);
1145 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1146 nlp->nlp_type &= ~NLP_FC_NODE;
1147 phba->nport_event_cnt++;
1149 rport_del = unmapped;
1151 case NLP_MAPPED_LIST:
1153 list_del(&nlp->nlp_listp);
1154 phba->nport_event_cnt++;
1160 list_del(&nlp->nlp_listp);
1161 /* Stop delay tmo if taking node off NPR list */
1162 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1163 (list != NLP_NPR_LIST)) {
1164 spin_unlock_irq(phba->host->host_lock);
1165 lpfc_cancel_retry_delay_tmo(phba, nlp);
1166 spin_lock_irq(phba->host->host_lock);
1171 nlp->nlp_flag &= ~NLP_LIST_MASK;
1173 /* Add NPort <did> to <num> list */
1174 lpfc_printf_log(phba,
1177 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1179 nlp->nlp_DID, list, nlp->nlp_flag);
1182 case NLP_NO_LIST: /* No list, just remove it */
1183 spin_unlock_irq(phba->host->host_lock);
1184 lpfc_nlp_remove(phba, nlp);
1185 spin_lock_irq(phba->host->host_lock);
1186 /* as node removed - stop further transport calls */
1189 case NLP_UNUSED_LIST:
1190 nlp->nlp_flag |= list;
1191 /* Put it at the end of the unused list */
1192 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1193 phba->fc_unused_cnt++;
1195 case NLP_PLOGI_LIST:
1196 nlp->nlp_flag |= list;
1197 /* Put it at the end of the plogi list */
1198 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1199 phba->fc_plogi_cnt++;
1201 case NLP_ADISC_LIST:
1202 nlp->nlp_flag |= list;
1203 /* Put it at the end of the adisc list */
1204 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1205 phba->fc_adisc_cnt++;
1207 case NLP_REGLOGIN_LIST:
1208 nlp->nlp_flag |= list;
1209 /* Put it at the end of the reglogin list */
1210 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1211 phba->fc_reglogin_cnt++;
1214 nlp->nlp_flag |= list;
1215 /* Put it at the end of the prli list */
1216 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1217 phba->fc_prli_cnt++;
1219 case NLP_UNMAPPED_LIST:
1220 rport_add = unmapped;
1221 /* ensure all vestiges of "mapped" significance are gone */
1222 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1223 nlp->nlp_flag |= list;
1224 /* Put it at the end of the unmap list */
1225 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1226 phba->fc_unmap_cnt++;
1227 phba->nport_event_cnt++;
1228 /* stop nodev tmo if running */
1229 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1230 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1231 spin_unlock_irq(phba->host->host_lock);
1232 del_timer_sync(&nlp->nlp_tmofunc);
1233 spin_lock_irq(phba->host->host_lock);
1234 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1235 list_del_init(&nlp->nodev_timeout_evt.
1239 nlp->nlp_type |= NLP_FC_NODE;
1241 case NLP_MAPPED_LIST:
1243 nlp->nlp_flag |= list;
1244 /* Put it at the end of the map list */
1245 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1247 phba->nport_event_cnt++;
1248 /* stop nodev tmo if running */
1249 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1250 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1251 spin_unlock_irq(phba->host->host_lock);
1252 del_timer_sync(&nlp->nlp_tmofunc);
1253 spin_lock_irq(phba->host->host_lock);
1254 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1255 list_del_init(&nlp->nodev_timeout_evt.
1261 nlp->nlp_flag |= list;
1262 /* Put it at the end of the npr list */
1263 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1266 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
1267 mod_timer(&nlp->nlp_tmofunc,
1268 jiffies + HZ * phba->cfg_nodev_tmo);
1270 nlp->nlp_flag |= NLP_NODEV_TMO;
1271 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1277 spin_unlock_irq(phba->host->host_lock);
1280 * We make all the calls into the transport after we have
1281 * moved the node between lists. This so that we don't
1282 * release the lock while in-between lists.
1285 /* Don't upcall midlayer if we're unloading */
1286 if (!(phba->fc_flag & FC_UNLOADING)) {
1288 * We revalidate the rport pointer as the "add" function
1289 * may have removed the remote port.
1291 if ((rport_del != none) && nlp->rport)
1292 lpfc_unregister_remote_port(phba, nlp);
1294 if (rport_add != none) {
1296 * Tell the fc transport about the port, if we haven't
1297 * already. If we have, and it's a scsi entity, be
1298 * sure to unblock any attached scsi devices
1301 lpfc_register_remote_port(phba, nlp);
1304 * if we added to Mapped list, but the remote port
1305 * registration failed or assigned a target id outside
1306 * our presentable range - move the node to the
1309 if ((rport_add == mapped) &&
1311 (nlp->rport->scsi_target_id == -1) ||
1312 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1313 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1314 spin_lock_irq(phba->host->host_lock);
1315 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1316 spin_unlock_irq(phba->host->host_lock);
1317 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1325 * Start / ReStart rescue timer for Discovery / RSCN handling
1328 lpfc_set_disctmo(struct lpfc_hba * phba)
1332 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1333 /* For FAN, timeout should be greater then edtov */
1334 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1336 /* Normal discovery timeout should be > then ELS/CT timeout
1337 * FC spec states we need 3 * ratov for CT requests
1339 tmo = ((phba->fc_ratov * 3) + 3);
1342 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1343 spin_lock_irq(phba->host->host_lock);
1344 phba->fc_flag |= FC_DISC_TMO;
1345 spin_unlock_irq(phba->host->host_lock);
1347 /* Start Discovery Timer state <hba_state> */
1348 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1349 "%d:0247 Start Discovery Timer state x%x "
1350 "Data: x%x x%lx x%x x%x\n",
1352 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1353 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1359 * Cancel rescue timer for Discovery / RSCN handling
1362 lpfc_can_disctmo(struct lpfc_hba * phba)
1364 /* Turn off discovery timer if its running */
1365 if (phba->fc_flag & FC_DISC_TMO) {
1366 spin_lock_irq(phba->host->host_lock);
1367 phba->fc_flag &= ~FC_DISC_TMO;
1368 spin_unlock_irq(phba->host->host_lock);
1369 del_timer_sync(&phba->fc_disctmo);
1370 phba->work_hba_events &= ~WORKER_DISC_TMO;
1373 /* Cancel Discovery Timer state <hba_state> */
1374 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1375 "%d:0248 Cancel Discovery Timer state x%x "
1376 "Data: x%x x%x x%x\n",
1377 phba->brd_no, phba->hba_state, phba->fc_flag,
1378 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1384 * Check specified ring for outstanding IOCB on the SLI queue
1385 * Return true if iocb matches the specified nport
1388 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1389 struct lpfc_sli_ring * pring,
1390 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1392 struct lpfc_sli *psli;
1397 if (pring->ringno == LPFC_ELS_RING) {
1398 switch (icmd->ulpCommand) {
1399 case CMD_GEN_REQUEST64_CR:
1400 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1402 case CMD_ELS_REQUEST64_CR:
1403 case CMD_XMIT_ELS_RSP64_CX:
1404 if (iocb->context1 == (uint8_t *) ndlp)
1407 } else if (pring->ringno == psli->ip_ring) {
1409 } else if (pring->ringno == psli->fcp_ring) {
1410 /* Skip match check if waiting to relogin to FCP target */
1411 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1412 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1415 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1418 } else if (pring->ringno == psli->next_ring) {
1425 * Free resources / clean up outstanding I/Os
1426 * associated with nlp_rpi in the LPFC_NODELIST entry.
1429 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1431 struct lpfc_sli *psli;
1432 struct lpfc_sli_ring *pring;
1433 struct lpfc_iocbq *iocb, *next_iocb;
1438 * Everything that matches on txcmplq will be returned
1439 * by firmware with a no rpi error.
1442 rpi = ndlp->nlp_rpi;
1444 /* Now process each ring */
1445 for (i = 0; i < psli->num_rings; i++) {
1446 pring = &psli->ring[i];
1448 spin_lock_irq(phba->host->host_lock);
1449 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1452 * Check to see if iocb matches the nport we are
1455 if ((lpfc_check_sli_ndlp
1456 (phba, pring, iocb, ndlp))) {
1457 /* It matches, so deque and call compl
1459 list_del(&iocb->list);
1461 if (iocb->iocb_cmpl) {
1464 IOSTAT_LOCAL_REJECT;
1465 icmd->un.ulpWord[4] =
1467 spin_unlock_irq(phba->host->
1469 (iocb->iocb_cmpl) (phba,
1471 spin_lock_irq(phba->host->
1474 lpfc_sli_release_iocbq(phba,
1478 spin_unlock_irq(phba->host->host_lock);
1486 * Free rpi associated with LPFC_NODELIST entry.
1487 * This routine is called from lpfc_freenode(), when we are removing
1488 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1489 * LOGO that completes successfully, and we are waiting to PLOGI back
1490 * to the remote NPort. In addition, it is called after we receive
1491 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1492 * we are waiting to PLOGI back to the remote NPort.
1495 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1500 if (ndlp->nlp_rpi) {
1501 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1502 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1503 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1504 rc = lpfc_sli_issue_mbox
1505 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1506 if (rc == MBX_NOT_FINISHED)
1507 mempool_free( mbox, phba->mbox_mem_pool);
1509 lpfc_no_rpi(phba, ndlp);
1517 * Free resources associated with LPFC_NODELIST entry
1518 * so it can be freed.
1521 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1524 LPFC_MBOXQ_t *nextmb;
1525 struct lpfc_dmabuf *mp;
1527 /* Cleanup node for NPort <nlp_DID> */
1528 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1529 "%d:0900 Cleanup node for NPort x%x "
1530 "Data: x%x x%x x%x\n",
1531 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1532 ndlp->nlp_state, ndlp->nlp_rpi);
1534 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1537 * if unloading the driver - just leave the remote port in place.
1538 * The driver unload will force the attached devices to detach
1539 * and flush cache's w/o generating flush errors.
1541 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1542 lpfc_unregister_remote_port(phba, ndlp);
1543 ndlp->nlp_sid = NLP_NO_SID;
1546 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1547 if ((mb = phba->sli.mbox_active)) {
1548 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1549 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1550 mb->context2 = NULL;
1551 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1554 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1555 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1556 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1557 mp = (struct lpfc_dmabuf *) (mb->context1);
1559 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1562 list_del(&mb->list);
1563 mempool_free(mb, phba->mbox_mem_pool);
1567 lpfc_els_abort(phba,ndlp,0);
1568 spin_lock_irq(phba->host->host_lock);
1569 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1570 spin_unlock_irq(phba->host->host_lock);
1571 del_timer_sync(&ndlp->nlp_tmofunc);
1573 ndlp->nlp_last_elscmd = 0;
1574 del_timer_sync(&ndlp->nlp_delayfunc);
1576 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1577 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1578 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1579 list_del_init(&ndlp->els_retry_evt.evt_listp);
1581 lpfc_unreg_rpi(phba, ndlp);
1587 * Check to see if we can free the nlp back to the freelist.
1588 * If we are in the middle of using the nlp in the discovery state
1589 * machine, defer the free till we reach the end of the state machine.
1592 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1594 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1595 spin_lock_irq(phba->host->host_lock);
1596 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1597 spin_unlock_irq(phba->host->host_lock);
1598 del_timer_sync(&ndlp->nlp_tmofunc);
1599 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1600 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1605 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1606 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1609 if (ndlp->nlp_disc_refcnt) {
1610 spin_lock_irq(phba->host->host_lock);
1611 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1612 spin_unlock_irq(phba->host->host_lock);
1614 lpfc_freenode(phba, ndlp);
1615 mempool_free( ndlp, phba->nlp_mem_pool);
1621 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1627 if (did == Bcast_DID)
1630 if (ndlp->nlp_DID == 0) {
1634 /* First check for Direct match */
1635 if (ndlp->nlp_DID == did)
1638 /* Next check for area/domain identically equals 0 match */
1639 mydid.un.word = phba->fc_myDID;
1640 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1644 matchdid.un.word = did;
1645 ndlpdid.un.word = ndlp->nlp_DID;
1646 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1647 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1648 (mydid.un.b.area == matchdid.un.b.area)) {
1649 if ((ndlpdid.un.b.domain == 0) &&
1650 (ndlpdid.un.b.area == 0)) {
1651 if (ndlpdid.un.b.id)
1657 matchdid.un.word = ndlp->nlp_DID;
1658 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1659 (mydid.un.b.area == ndlpdid.un.b.area)) {
1660 if ((matchdid.un.b.domain == 0) &&
1661 (matchdid.un.b.area == 0)) {
1662 if (matchdid.un.b.id)
1670 /* Search for a nodelist entry on a specific list */
1671 struct lpfc_nodelist *
1672 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1674 struct lpfc_nodelist *ndlp, *next_ndlp;
1677 spin_lock_irq(phba->host->host_lock);
1678 if (order & NLP_SEARCH_UNMAPPED) {
1679 list_for_each_entry_safe(ndlp, next_ndlp,
1680 &phba->fc_nlpunmap_list, nlp_listp) {
1681 if (lpfc_matchdid(phba, ndlp, did)) {
1682 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1683 ((uint32_t) ndlp->nlp_xri << 16) |
1684 ((uint32_t) ndlp->nlp_type << 8) |
1685 ((uint32_t) ndlp->nlp_rpi & 0xff));
1686 /* FIND node DID unmapped */
1687 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1688 "%d:0929 FIND node DID unmapped"
1689 " Data: x%p x%x x%x x%x\n",
1691 ndlp, ndlp->nlp_DID,
1692 ndlp->nlp_flag, data1);
1693 spin_unlock_irq(phba->host->host_lock);
1699 if (order & NLP_SEARCH_MAPPED) {
1700 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1702 if (lpfc_matchdid(phba, ndlp, did)) {
1704 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1705 ((uint32_t) ndlp->nlp_xri << 16) |
1706 ((uint32_t) ndlp->nlp_type << 8) |
1707 ((uint32_t) ndlp->nlp_rpi & 0xff));
1708 /* FIND node DID mapped */
1709 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1710 "%d:0930 FIND node DID mapped "
1711 "Data: x%p x%x x%x x%x\n",
1713 ndlp, ndlp->nlp_DID,
1714 ndlp->nlp_flag, data1);
1715 spin_unlock_irq(phba->host->host_lock);
1721 if (order & NLP_SEARCH_PLOGI) {
1722 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1724 if (lpfc_matchdid(phba, ndlp, did)) {
1726 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1727 ((uint32_t) ndlp->nlp_xri << 16) |
1728 ((uint32_t) ndlp->nlp_type << 8) |
1729 ((uint32_t) ndlp->nlp_rpi & 0xff));
1730 /* LOG change to PLOGI */
1731 /* FIND node DID plogi */
1732 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1733 "%d:0908 FIND node DID plogi "
1734 "Data: x%p x%x x%x x%x\n",
1736 ndlp, ndlp->nlp_DID,
1737 ndlp->nlp_flag, data1);
1738 spin_unlock_irq(phba->host->host_lock);
1744 if (order & NLP_SEARCH_ADISC) {
1745 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1747 if (lpfc_matchdid(phba, ndlp, did)) {
1749 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1750 ((uint32_t) ndlp->nlp_xri << 16) |
1751 ((uint32_t) ndlp->nlp_type << 8) |
1752 ((uint32_t) ndlp->nlp_rpi & 0xff));
1753 /* LOG change to ADISC */
1754 /* FIND node DID adisc */
1755 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1756 "%d:0931 FIND node DID adisc "
1757 "Data: x%p x%x x%x x%x\n",
1759 ndlp, ndlp->nlp_DID,
1760 ndlp->nlp_flag, data1);
1766 if (order & NLP_SEARCH_REGLOGIN) {
1767 list_for_each_entry_safe(ndlp, next_ndlp,
1768 &phba->fc_reglogin_list, nlp_listp) {
1769 if (lpfc_matchdid(phba, ndlp, did)) {
1771 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1772 ((uint32_t) ndlp->nlp_xri << 16) |
1773 ((uint32_t) ndlp->nlp_type << 8) |
1774 ((uint32_t) ndlp->nlp_rpi & 0xff));
1775 /* LOG change to REGLOGIN */
1776 /* FIND node DID reglogin */
1777 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1778 "%d:0931 FIND node DID reglogin"
1779 " Data: x%p x%x x%x x%x\n",
1781 ndlp, ndlp->nlp_DID,
1782 ndlp->nlp_flag, data1);
1783 spin_unlock_irq(phba->host->host_lock);
1789 if (order & NLP_SEARCH_PRLI) {
1790 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1792 if (lpfc_matchdid(phba, ndlp, did)) {
1794 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1795 ((uint32_t) ndlp->nlp_xri << 16) |
1796 ((uint32_t) ndlp->nlp_type << 8) |
1797 ((uint32_t) ndlp->nlp_rpi & 0xff));
1798 /* LOG change to PRLI */
1799 /* FIND node DID prli */
1800 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1801 "%d:0931 FIND node DID prli "
1802 "Data: x%p x%x x%x x%x\n",
1804 ndlp, ndlp->nlp_DID,
1805 ndlp->nlp_flag, data1);
1811 if (order & NLP_SEARCH_NPR) {
1812 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1814 if (lpfc_matchdid(phba, ndlp, did)) {
1816 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1817 ((uint32_t) ndlp->nlp_xri << 16) |
1818 ((uint32_t) ndlp->nlp_type << 8) |
1819 ((uint32_t) ndlp->nlp_rpi & 0xff));
1820 /* LOG change to NPR */
1821 /* FIND node DID npr */
1822 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1823 "%d:0931 FIND node DID npr "
1824 "Data: x%p x%x x%x x%x\n",
1826 ndlp, ndlp->nlp_DID,
1827 ndlp->nlp_flag, data1);
1828 spin_unlock_irq(phba->host->host_lock);
1834 if (order & NLP_SEARCH_UNUSED) {
1835 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1837 if (lpfc_matchdid(phba, ndlp, did)) {
1839 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1840 ((uint32_t) ndlp->nlp_xri << 16) |
1841 ((uint32_t) ndlp->nlp_type << 8) |
1842 ((uint32_t) ndlp->nlp_rpi & 0xff));
1843 /* LOG change to UNUSED */
1844 /* FIND node DID unused */
1845 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1846 "%d:0931 FIND node DID unused "
1847 "Data: x%p x%x x%x x%x\n",
1849 ndlp, ndlp->nlp_DID,
1850 ndlp->nlp_flag, data1);
1851 spin_unlock_irq(phba->host->host_lock);
1857 spin_unlock_irq(phba->host->host_lock);
1859 /* FIND node did <did> NOT FOUND */
1860 lpfc_printf_log(phba,
1863 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1864 phba->brd_no, did, order);
1866 /* no match found */
1870 struct lpfc_nodelist *
1871 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1873 struct lpfc_nodelist *ndlp;
1876 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1878 if ((phba->fc_flag & FC_RSCN_MODE) &&
1879 ((lpfc_rscn_payload_check(phba, did) == 0)))
1881 ndlp = (struct lpfc_nodelist *)
1882 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1885 lpfc_nlp_init(phba, ndlp, did);
1886 ndlp->nlp_state = NLP_STE_NPR_NODE;
1887 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1888 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1891 if (phba->fc_flag & FC_RSCN_MODE) {
1892 if (lpfc_rscn_payload_check(phba, did)) {
1893 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1895 /* Since this node is marked for discovery,
1896 * delay timeout is not needed.
1898 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1899 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1901 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1905 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1906 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
1908 ndlp->nlp_state = NLP_STE_NPR_NODE;
1909 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1910 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1915 /* Build a list of nodes to discover based on the loopmap */
1917 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1920 uint32_t alpa, index;
1922 if (phba->hba_state <= LPFC_LINK_DOWN) {
1925 if (phba->fc_topology != TOPOLOGY_LOOP) {
1929 /* Check for loop map present or not */
1930 if (phba->alpa_map[0]) {
1931 for (j = 1; j <= phba->alpa_map[0]; j++) {
1932 alpa = phba->alpa_map[j];
1934 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1937 lpfc_setup_disc_node(phba, alpa);
1940 /* No alpamap, so try all alpa's */
1941 for (j = 0; j < FC_MAXLOOP; j++) {
1942 /* If cfg_scan_down is set, start from highest
1943 * ALPA (0xef) to lowest (0x1).
1945 if (phba->cfg_scan_down)
1948 index = FC_MAXLOOP - j - 1;
1949 alpa = lpfcAlpaArray[index];
1950 if ((phba->fc_myDID & 0xff) == alpa) {
1954 lpfc_setup_disc_node(phba, alpa);
1960 /* Start Link up / RSCN discovery on NPR list */
1962 lpfc_disc_start(struct lpfc_hba * phba)
1964 struct lpfc_sli *psli;
1966 struct lpfc_nodelist *ndlp, *next_ndlp;
1967 uint32_t did_changed, num_sent;
1968 uint32_t clear_la_pending;
1973 if (phba->hba_state <= LPFC_LINK_DOWN) {
1976 if (phba->hba_state == LPFC_CLEAR_LA)
1977 clear_la_pending = 1;
1979 clear_la_pending = 0;
1981 if (phba->hba_state < LPFC_HBA_READY) {
1982 phba->hba_state = LPFC_DISC_AUTH;
1984 lpfc_set_disctmo(phba);
1986 if (phba->fc_prevDID == phba->fc_myDID) {
1991 phba->fc_prevDID = phba->fc_myDID;
1992 phba->num_disc_nodes = 0;
1994 /* Start Discovery state <hba_state> */
1995 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1996 "%d:0202 Start Discovery hba state x%x "
1997 "Data: x%x x%x x%x\n",
1998 phba->brd_no, phba->hba_state, phba->fc_flag,
1999 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
2001 /* If our did changed, we MUST do PLOGI */
2002 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2004 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2006 spin_lock_irq(phba->host->host_lock);
2007 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2008 spin_unlock_irq(phba->host->host_lock);
2013 /* First do ADISCs - if any */
2014 num_sent = lpfc_els_disc_adisc(phba);
2019 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
2020 /* If we get here, there is nothing to ADISC */
2021 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
2022 phba->hba_state = LPFC_CLEAR_LA;
2023 lpfc_clear_la(phba, mbox);
2024 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2025 rc = lpfc_sli_issue_mbox(phba, mbox,
2026 (MBX_NOWAIT | MBX_STOP_IOCB));
2027 if (rc == MBX_NOT_FINISHED) {
2028 mempool_free( mbox, phba->mbox_mem_pool);
2029 lpfc_disc_flush_list(phba);
2030 psli->ring[(psli->ip_ring)].flag &=
2031 ~LPFC_STOP_IOCB_EVENT;
2032 psli->ring[(psli->fcp_ring)].flag &=
2033 ~LPFC_STOP_IOCB_EVENT;
2034 psli->ring[(psli->next_ring)].flag &=
2035 ~LPFC_STOP_IOCB_EVENT;
2036 phba->hba_state = LPFC_HBA_READY;
2040 /* Next do PLOGIs - if any */
2041 num_sent = lpfc_els_disc_plogi(phba);
2046 if (phba->fc_flag & FC_RSCN_MODE) {
2047 /* Check to see if more RSCNs came in while we
2048 * were processing this one.
2050 if ((phba->fc_rscn_id_cnt == 0) &&
2051 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2052 spin_lock_irq(phba->host->host_lock);
2053 phba->fc_flag &= ~FC_RSCN_MODE;
2054 spin_unlock_irq(phba->host->host_lock);
2056 lpfc_els_handle_rscn(phba);
2063 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2064 * ring the match the sppecified nodelist.
2067 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2069 struct lpfc_sli *psli;
2071 struct lpfc_iocbq *iocb, *next_iocb;
2072 struct lpfc_sli_ring *pring;
2073 struct lpfc_dmabuf *mp;
2076 pring = &psli->ring[LPFC_ELS_RING];
2078 /* Error matching iocb on txq or txcmplq
2079 * First check the txq.
2081 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2082 if (iocb->context1 != ndlp) {
2086 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2087 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2089 list_del(&iocb->list);
2091 lpfc_els_free_iocb(phba, iocb);
2095 /* Next check the txcmplq */
2096 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2097 if (iocb->context1 != ndlp) {
2101 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2102 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2104 iocb->iocb_cmpl = NULL;
2105 /* context2 = cmd, context2->next = rsp, context3 =
2107 if (iocb->context2) {
2108 /* Free the response IOCB before handling the
2111 mp = (struct lpfc_dmabuf *) (iocb->context2);
2112 mp = list_get_first(&mp->list,
2116 /* Delay before releasing rsp buffer to
2117 * give UNREG mbox a chance to take
2121 &phba->freebufList);
2123 lpfc_mbuf_free(phba,
2124 ((struct lpfc_dmabuf *)
2125 iocb->context2)->virt,
2126 ((struct lpfc_dmabuf *)
2127 iocb->context2)->phys);
2128 kfree(iocb->context2);
2131 if (iocb->context3) {
2132 lpfc_mbuf_free(phba,
2133 ((struct lpfc_dmabuf *)
2134 iocb->context3)->virt,
2135 ((struct lpfc_dmabuf *)
2136 iocb->context3)->phys);
2137 kfree(iocb->context3);
2146 lpfc_disc_flush_list(struct lpfc_hba * phba)
2148 struct lpfc_nodelist *ndlp, *next_ndlp;
2150 if (phba->fc_plogi_cnt) {
2151 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2153 lpfc_free_tx(phba, ndlp);
2154 lpfc_nlp_remove(phba, ndlp);
2157 if (phba->fc_adisc_cnt) {
2158 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2160 lpfc_free_tx(phba, ndlp);
2161 lpfc_nlp_remove(phba, ndlp);
2167 /*****************************************************************************/
2169 * NAME: lpfc_disc_timeout
2171 * FUNCTION: Fibre Channel driver discovery timeout routine.
2173 * EXECUTION ENVIRONMENT: interrupt only
2181 /*****************************************************************************/
2183 lpfc_disc_timeout(unsigned long ptr)
2185 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2186 unsigned long flags = 0;
2188 if (unlikely(!phba))
2191 spin_lock_irqsave(phba->host->host_lock, flags);
2192 if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2193 phba->work_hba_events |= WORKER_DISC_TMO;
2194 if (phba->work_wait)
2195 wake_up(phba->work_wait);
2197 spin_unlock_irqrestore(phba->host->host_lock, flags);
2202 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2204 struct lpfc_sli *psli;
2205 struct lpfc_nodelist *ndlp, *next_ndlp;
2206 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2207 int rc, clrlaerr = 0;
2209 if (unlikely(!phba))
2212 if (!(phba->fc_flag & FC_DISC_TMO))
2217 spin_lock_irq(phba->host->host_lock);
2218 phba->fc_flag &= ~FC_DISC_TMO;
2219 spin_unlock_irq(phba->host->host_lock);
2221 switch (phba->hba_state) {
2223 case LPFC_LOCAL_CFG_LINK:
2224 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2226 lpfc_printf_log(phba,
2229 "%d:0221 FAN timeout\n",
2232 /* Start discovery by sending FLOGI, clean up old rpis */
2233 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2235 if (ndlp->nlp_type & NLP_FABRIC) {
2236 /* Clean up the ndlp on Fabric connections */
2237 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
2238 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2239 /* Fail outstanding IO now since device
2240 * is marked for PLOGI.
2242 lpfc_unreg_rpi(phba, ndlp);
2245 phba->hba_state = LPFC_FLOGI;
2246 lpfc_set_disctmo(phba);
2247 lpfc_initial_flogi(phba);
2251 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2252 /* Initial FLOGI timeout */
2253 lpfc_printf_log(phba,
2256 "%d:0222 Initial FLOGI timeout\n",
2259 /* Assume no Fabric and go on with discovery.
2260 * Check for outstanding ELS FLOGI to abort.
2263 /* FLOGI failed, so just use loop map to make discovery list */
2264 lpfc_disc_list_loopmap(phba);
2266 /* Start discovery */
2267 lpfc_disc_start(phba);
2270 case LPFC_FABRIC_CFG_LINK:
2271 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2273 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2274 "%d:0223 Timeout while waiting for NameServer "
2275 "login\n", phba->brd_no);
2277 /* Next look for NameServer ndlp */
2278 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2280 lpfc_nlp_remove(phba, ndlp);
2281 /* Start discovery */
2282 lpfc_disc_start(phba);
2286 /* Check for wait for NameServer Rsp timeout */
2287 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2288 "%d:0224 NameServer Query timeout "
2291 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2293 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2296 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2297 /* Try it one more time */
2298 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2302 phba->fc_ns_retry = 0;
2305 /* Nothing to authenticate, so CLEAR_LA right now */
2306 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2309 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2310 "%d:0226 Device Discovery "
2311 "completion error\n",
2313 phba->hba_state = LPFC_HBA_ERROR;
2317 phba->hba_state = LPFC_CLEAR_LA;
2318 lpfc_clear_la(phba, clearlambox);
2319 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2320 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2321 (MBX_NOWAIT | MBX_STOP_IOCB));
2322 if (rc == MBX_NOT_FINISHED) {
2323 mempool_free(clearlambox, phba->mbox_mem_pool);
2328 /* Setup and issue mailbox INITIALIZE LINK command */
2329 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2330 if (!initlinkmbox) {
2331 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2332 "%d:0226 Device Discovery "
2333 "completion error\n",
2335 phba->hba_state = LPFC_HBA_ERROR;
2339 lpfc_linkdown(phba);
2340 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2341 phba->cfg_link_speed);
2342 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2343 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2344 (MBX_NOWAIT | MBX_STOP_IOCB));
2345 if (rc == MBX_NOT_FINISHED)
2346 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2350 case LPFC_DISC_AUTH:
2351 /* Node Authentication timeout */
2352 lpfc_printf_log(phba,
2355 "%d:0227 Node Authentication timeout\n",
2357 lpfc_disc_flush_list(phba);
2358 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2361 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2362 "%d:0226 Device Discovery "
2363 "completion error\n",
2365 phba->hba_state = LPFC_HBA_ERROR;
2368 phba->hba_state = LPFC_CLEAR_LA;
2369 lpfc_clear_la(phba, clearlambox);
2370 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2371 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2372 (MBX_NOWAIT | MBX_STOP_IOCB));
2373 if (rc == MBX_NOT_FINISHED) {
2374 mempool_free(clearlambox, phba->mbox_mem_pool);
2380 /* CLEAR LA timeout */
2381 lpfc_printf_log(phba,
2384 "%d:0228 CLEAR LA timeout\n",
2389 case LPFC_HBA_READY:
2390 if (phba->fc_flag & FC_RSCN_MODE) {
2391 lpfc_printf_log(phba,
2394 "%d:0231 RSCN timeout Data: x%x x%x\n",
2396 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2398 /* Cleanup any outstanding ELS commands */
2399 lpfc_els_flush_cmd(phba);
2401 lpfc_els_flush_rscn(phba);
2402 lpfc_disc_flush_list(phba);
2408 lpfc_disc_flush_list(phba);
2409 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2410 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2411 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2412 phba->hba_state = LPFC_HBA_READY;
2419 lpfc_nodev_timeout(unsigned long ptr)
2421 struct lpfc_hba *phba;
2422 struct lpfc_nodelist *ndlp;
2423 unsigned long iflag;
2424 struct lpfc_work_evt *evtp;
2426 ndlp = (struct lpfc_nodelist *)ptr;
2427 phba = ndlp->nlp_phba;
2428 evtp = &ndlp->nodev_timeout_evt;
2429 spin_lock_irqsave(phba->host->host_lock, iflag);
2431 if (!list_empty(&evtp->evt_listp)) {
2432 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2435 evtp->evt_arg1 = ndlp;
2436 evtp->evt = LPFC_EVT_NODEV_TMO;
2437 list_add_tail(&evtp->evt_listp, &phba->work_list);
2438 if (phba->work_wait)
2439 wake_up(phba->work_wait);
2441 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2447 * This routine handles processing a NameServer REG_LOGIN mailbox
2448 * command upon completion. It is setup in the LPFC_MBOXQ
2449 * as the completion routine when the command is
2450 * handed off to the SLI layer.
2453 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2455 struct lpfc_sli *psli;
2457 struct lpfc_dmabuf *mp;
2458 struct lpfc_nodelist *ndlp;
2463 ndlp = (struct lpfc_nodelist *) pmb->context2;
2464 mp = (struct lpfc_dmabuf *) (pmb->context1);
2466 pmb->context1 = NULL;
2468 ndlp->nlp_rpi = mb->un.varWords[0];
2469 ndlp->nlp_type |= NLP_FABRIC;
2470 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2471 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2473 /* Start issuing Fabric-Device Management Interface (FDMI)
2474 * command to 0xfffffa (FDMI well known port)
2476 if (phba->cfg_fdmi_on == 1) {
2477 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2480 * Delay issuing FDMI command if fdmi-on=2
2481 * (supporting RPA/hostnmae)
2483 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2486 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2488 mempool_free( pmb, phba->mbox_mem_pool);
2494 * This routine looks up the ndlp lists
2495 * for the given RPI. If rpi found
2496 * it return the node list pointer
2499 struct lpfc_nodelist *
2500 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2502 struct lpfc_nodelist *ndlp;
2503 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2504 &phba->fc_nlpmap_list,
2505 &phba->fc_plogi_list,
2506 &phba->fc_adisc_list,
2507 &phba->fc_reglogin_list};
2510 spin_lock_irq(phba->host->host_lock);
2511 for (i = 0; i < ARRAY_SIZE(lists); i++ )
2512 list_for_each_entry(ndlp, lists[i], nlp_listp)
2513 if (ndlp->nlp_rpi == rpi) {
2514 spin_unlock_irq(phba->host->host_lock);
2517 spin_unlock_irq(phba->host->host_lock);
2522 * This routine looks up the ndlp lists
2523 * for the given WWPN. If WWPN found
2524 * it return the node list pointer
2527 struct lpfc_nodelist *
2528 lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
2529 struct lpfc_name * wwpn)
2531 struct lpfc_nodelist *ndlp;
2532 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2533 &phba->fc_nlpmap_list,
2535 &phba->fc_plogi_list,
2536 &phba->fc_adisc_list,
2537 &phba->fc_reglogin_list,
2538 &phba->fc_prli_list};
2539 uint32_t search[]={NLP_SEARCH_UNMAPPED,
2544 NLP_SEARCH_REGLOGIN,
2548 spin_lock_irq(phba->host->host_lock);
2549 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
2550 if (!(order & search[i]))
2552 list_for_each_entry(ndlp, lists[i], nlp_listp) {
2553 if (memcmp(&ndlp->nlp_portname, wwpn,
2554 sizeof(struct lpfc_name)) == 0) {
2555 spin_unlock_irq(phba->host->host_lock);
2560 spin_unlock_irq(phba->host->host_lock);
2565 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2568 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2569 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2570 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2571 init_timer(&ndlp->nlp_tmofunc);
2572 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2573 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2574 init_timer(&ndlp->nlp_delayfunc);
2575 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2576 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2577 ndlp->nlp_DID = did;
2578 ndlp->nlp_phba = phba;
2579 ndlp->nlp_sid = NLP_NO_SID;