[SCSI] lpfc 8.1.2: Add ERROR and WARM_START modes for diagnostic purposes.
[pandora-kernel.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray[] = {
42         0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43         0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44         0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45         0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46         0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47         0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49         0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50         0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51         0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52         0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53         0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54         0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
55 };
56
57 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58
59 static void
60 lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
61 {
62         int warn_on = 0;
63
64         spin_lock_irq(phba->host->host_lock);
65         if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
66                 spin_unlock_irq(phba->host->host_lock);
67                 return;
68         }
69
70         ndlp->nlp_flag &= ~NLP_NODEV_TMO;
71
72         if (ndlp->nlp_sid != NLP_NO_SID) {
73                 warn_on = 1;
74                 /* flush the target */
75                 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
76                         ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
77         }
78         spin_unlock_irq(phba->host->host_lock);
79
80         if (warn_on) {
81                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
82                                 "%d:0203 Nodev timeout on NPort x%x "
83                                 "Data: x%x x%x x%x\n",
84                                 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
85                                 ndlp->nlp_state, ndlp->nlp_rpi);
86         } else {
87                 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
88                                 "%d:0204 Nodev timeout on NPort x%x "
89                                 "Data: x%x x%x x%x\n",
90                                 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
91                                 ndlp->nlp_state, ndlp->nlp_rpi);
92         }
93
94         lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
95         return;
96 }
97
98 static void
99 lpfc_work_list_done(struct lpfc_hba * phba)
100 {
101         struct lpfc_work_evt  *evtp = NULL;
102         struct lpfc_nodelist  *ndlp;
103         int free_evt;
104
105         spin_lock_irq(phba->host->host_lock);
106         while(!list_empty(&phba->work_list)) {
107                 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
108                                  evt_listp);
109                 spin_unlock_irq(phba->host->host_lock);
110                 free_evt = 1;
111                 switch(evtp->evt) {
112                 case LPFC_EVT_NODEV_TMO:
113                         ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
114                         lpfc_process_nodev_timeout(phba, ndlp);
115                         free_evt = 0;
116                         break;
117                 case LPFC_EVT_ELS_RETRY:
118                         ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
119                         lpfc_els_retry_delay_handler(ndlp);
120                         free_evt = 0;
121                         break;
122                 case LPFC_EVT_ONLINE:
123                         if (phba->hba_state < LPFC_LINK_DOWN)
124                                 *(int *)(evtp->evt_arg1)  = lpfc_online(phba);
125                         else
126                                 *(int *)(evtp->evt_arg1)  = 0;
127                         complete((struct completion *)(evtp->evt_arg2));
128                         break;
129                 case LPFC_EVT_OFFLINE:
130                         if (phba->hba_state >= LPFC_LINK_DOWN)
131                                 lpfc_offline(phba);
132                         lpfc_sli_brdrestart(phba);
133                         *(int *)(evtp->evt_arg1) =
134                                 lpfc_sli_brdready(phba,HS_FFRDY | HS_MBRDY);
135                         complete((struct completion *)(evtp->evt_arg2));
136                         break;
137                 case LPFC_EVT_WARM_START:
138                         if (phba->hba_state >= LPFC_LINK_DOWN)
139                                 lpfc_offline(phba);
140                         lpfc_sli_brdreset(phba);
141                         lpfc_hba_down_post(phba);
142                         *(int *)(evtp->evt_arg1) =
143                                 lpfc_sli_brdready(phba, HS_MBRDY);
144                         complete((struct completion *)(evtp->evt_arg2));
145                         break;
146                 case LPFC_EVT_KILL:
147                         if (phba->hba_state >= LPFC_LINK_DOWN)
148                                 lpfc_offline(phba);
149                         *(int *)(evtp->evt_arg1)  = lpfc_sli_brdkill(phba);
150                         complete((struct completion *)(evtp->evt_arg2));
151                         break;
152                 }
153                 if (free_evt)
154                         kfree(evtp);
155                 spin_lock_irq(phba->host->host_lock);
156         }
157         spin_unlock_irq(phba->host->host_lock);
158
159 }
160
161 static void
162 lpfc_work_done(struct lpfc_hba * phba)
163 {
164         struct lpfc_sli_ring *pring;
165         int i;
166         uint32_t ha_copy;
167         uint32_t control;
168         uint32_t work_hba_events;
169
170         spin_lock_irq(phba->host->host_lock);
171         ha_copy = phba->work_ha;
172         phba->work_ha = 0;
173         work_hba_events=phba->work_hba_events;
174         spin_unlock_irq(phba->host->host_lock);
175
176         if(ha_copy & HA_ERATT)
177                 lpfc_handle_eratt(phba);
178
179         if(ha_copy & HA_MBATT)
180                 lpfc_sli_handle_mb_event(phba);
181
182         if(ha_copy & HA_LATT)
183                 lpfc_handle_latt(phba);
184
185         if (work_hba_events & WORKER_DISC_TMO)
186                 lpfc_disc_timeout_handler(phba);
187
188         if (work_hba_events & WORKER_ELS_TMO)
189                 lpfc_els_timeout_handler(phba);
190
191         if (work_hba_events & WORKER_MBOX_TMO)
192                 lpfc_mbox_timeout_handler(phba);
193
194         if (work_hba_events & WORKER_FDMI_TMO)
195                 lpfc_fdmi_tmo_handler(phba);
196
197         spin_lock_irq(phba->host->host_lock);
198         phba->work_hba_events &= ~work_hba_events;
199         spin_unlock_irq(phba->host->host_lock);
200
201         for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
202                 pring = &phba->sli.ring[i];
203                 if ((ha_copy & HA_RXATT)
204                     || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
205                         if (pring->flag & LPFC_STOP_IOCB_MASK) {
206                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
207                         } else {
208                                 lpfc_sli_handle_slow_ring_event(phba, pring,
209                                                                 (ha_copy &
210                                                                  HA_RXMASK));
211                                 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
212                         }
213                         /*
214                          * Turn on Ring interrupts
215                          */
216                         spin_lock_irq(phba->host->host_lock);
217                         control = readl(phba->HCregaddr);
218                         control |= (HC_R0INT_ENA << i);
219                         writel(control, phba->HCregaddr);
220                         readl(phba->HCregaddr); /* flush */
221                         spin_unlock_irq(phba->host->host_lock);
222                 }
223         }
224
225         lpfc_work_list_done (phba);
226
227 }
228
229 static int
230 check_work_wait_done(struct lpfc_hba *phba) {
231
232         spin_lock_irq(phba->host->host_lock);
233         if (phba->work_ha ||
234             phba->work_hba_events ||
235             (!list_empty(&phba->work_list)) ||
236             kthread_should_stop()) {
237                 spin_unlock_irq(phba->host->host_lock);
238                 return 1;
239         } else {
240                 spin_unlock_irq(phba->host->host_lock);
241                 return 0;
242         }
243 }
244
245 int
246 lpfc_do_work(void *p)
247 {
248         struct lpfc_hba *phba = p;
249         int rc;
250         DECLARE_WAIT_QUEUE_HEAD(work_waitq);
251
252         set_user_nice(current, -20);
253         phba->work_wait = &work_waitq;
254
255         while (1) {
256
257                 rc = wait_event_interruptible(work_waitq,
258                                                 check_work_wait_done(phba));
259                 BUG_ON(rc);
260
261                 if (kthread_should_stop())
262                         break;
263
264                 lpfc_work_done(phba);
265
266         }
267         phba->work_wait = NULL;
268         return 0;
269 }
270
271 /*
272  * This is only called to handle FC worker events. Since this a rare
273  * occurance, we allocate a struct lpfc_work_evt structure here instead of
274  * embedding it in the IOCB.
275  */
276 int
277 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
278                       uint32_t evt)
279 {
280         struct lpfc_work_evt  *evtp;
281
282         /*
283          * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
284          * be queued to worker thread for processing
285          */
286         evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
287         if (!evtp)
288                 return 0;
289
290         evtp->evt_arg1  = arg1;
291         evtp->evt_arg2  = arg2;
292         evtp->evt       = evt;
293
294         list_add_tail(&evtp->evt_listp, &phba->work_list);
295         spin_lock_irq(phba->host->host_lock);
296         if (phba->work_wait)
297                 wake_up(phba->work_wait);
298         spin_unlock_irq(phba->host->host_lock);
299
300         return 1;
301 }
302
303 int
304 lpfc_linkdown(struct lpfc_hba * phba)
305 {
306         struct lpfc_sli       *psli;
307         struct lpfc_nodelist  *ndlp, *next_ndlp;
308         struct list_head *listp, *node_list[7];
309         LPFC_MBOXQ_t     *mb;
310         int               rc, i;
311
312         if (phba->hba_state == LPFC_LINK_DOWN) {
313                 return 0;
314         }
315
316         psli = &phba->sli;
317
318         /* sysfs or selective reset may call this routine to clean up */
319         if (phba->hba_state > LPFC_LINK_DOWN) {
320                 spin_lock_irq(phba->host->host_lock);
321                 phba->hba_state = LPFC_LINK_DOWN;
322                 spin_unlock_irq(phba->host->host_lock);
323         }
324
325         /* Clean up any firmware default rpi's */
326         if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
327                 lpfc_unreg_did(phba, 0xffffffff, mb);
328                 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
329                 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
330                     == MBX_NOT_FINISHED) {
331                         mempool_free( mb, phba->mbox_mem_pool);
332                 }
333         }
334
335         /* Cleanup any outstanding RSCN activity */
336         lpfc_els_flush_rscn(phba);
337
338         /* Cleanup any outstanding ELS commands */
339         lpfc_els_flush_cmd(phba);
340
341         /* Issue a LINK DOWN event to all nodes */
342         node_list[0] = &phba->fc_npr_list;  /* MUST do this list first */
343         node_list[1] = &phba->fc_nlpmap_list;
344         node_list[2] = &phba->fc_nlpunmap_list;
345         node_list[3] = &phba->fc_prli_list;
346         node_list[4] = &phba->fc_reglogin_list;
347         node_list[5] = &phba->fc_adisc_list;
348         node_list[6] = &phba->fc_plogi_list;
349         for (i = 0; i < 7; i++) {
350                 listp = node_list[i];
351                 if (list_empty(listp))
352                         continue;
353
354                 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
355
356                         rc = lpfc_disc_state_machine(phba, ndlp, NULL,
357                                              NLP_EVT_DEVICE_RECOVERY);
358
359                         /* Check config parameter use-adisc or FCP-2 */
360                         if ((rc != NLP_STE_FREED_NODE) &&
361                                 (phba->cfg_use_adisc == 0) &&
362                                 !(ndlp->nlp_fcp_info &
363                                         NLP_FCP_2_DEVICE)) {
364                                 /* We know we will have to relogin, so
365                                  * unreglogin the rpi right now to fail
366                                  * any outstanding I/Os quickly.
367                                  */
368                                 lpfc_unreg_rpi(phba, ndlp);
369                         }
370                 }
371         }
372
373         /* free any ndlp's on unused list */
374         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
375                                 nlp_listp) {
376                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
377         }
378
379         /* Setup myDID for link up if we are in pt2pt mode */
380         if (phba->fc_flag & FC_PT2PT) {
381                 phba->fc_myDID = 0;
382                 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
383                         lpfc_config_link(phba, mb);
384                         mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
385                         if (lpfc_sli_issue_mbox
386                             (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
387                             == MBX_NOT_FINISHED) {
388                                 mempool_free( mb, phba->mbox_mem_pool);
389                         }
390                 }
391                 spin_lock_irq(phba->host->host_lock);
392                 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
393                 spin_unlock_irq(phba->host->host_lock);
394         }
395         spin_lock_irq(phba->host->host_lock);
396         phba->fc_flag &= ~FC_LBIT;
397         spin_unlock_irq(phba->host->host_lock);
398
399         /* Turn off discovery timer if its running */
400         lpfc_can_disctmo(phba);
401
402         /* Must process IOCBs on all rings to handle ABORTed I/Os */
403         return (0);
404 }
405
406 static int
407 lpfc_linkup(struct lpfc_hba * phba)
408 {
409         struct lpfc_nodelist *ndlp, *next_ndlp;
410         struct list_head *listp, *node_list[7];
411         int i;
412
413         spin_lock_irq(phba->host->host_lock);
414         phba->hba_state = LPFC_LINK_UP;
415         phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
416                            FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
417         phba->fc_flag |= FC_NDISC_ACTIVE;
418         phba->fc_ns_retry = 0;
419         spin_unlock_irq(phba->host->host_lock);
420
421
422         node_list[0] = &phba->fc_plogi_list;
423         node_list[1] = &phba->fc_adisc_list;
424         node_list[2] = &phba->fc_reglogin_list;
425         node_list[3] = &phba->fc_prli_list;
426         node_list[4] = &phba->fc_nlpunmap_list;
427         node_list[5] = &phba->fc_nlpmap_list;
428         node_list[6] = &phba->fc_npr_list;
429         for (i = 0; i < 7; i++) {
430                 listp = node_list[i];
431                 if (list_empty(listp))
432                         continue;
433
434                 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
435                         if (phba->fc_flag & FC_LBIT) {
436                                 if (ndlp->nlp_type & NLP_FABRIC) {
437                                         /* On Linkup its safe to clean up the
438                                          * ndlp from Fabric connections.
439                                          */
440                                         lpfc_nlp_list(phba, ndlp,
441                                                         NLP_UNUSED_LIST);
442                                 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
443                                         /* Fail outstanding IO now since device
444                                          * is marked for PLOGI.
445                                          */
446                                         lpfc_unreg_rpi(phba, ndlp);
447                                 }
448                         }
449                 }
450         }
451
452         /* free any ndlp's on unused list */
453         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
454                                 nlp_listp) {
455                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
456         }
457
458         return 0;
459 }
460
461 /*
462  * This routine handles processing a CLEAR_LA mailbox
463  * command upon completion. It is setup in the LPFC_MBOXQ
464  * as the completion routine when the command is
465  * handed off to the SLI layer.
466  */
467 void
468 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
469 {
470         struct lpfc_sli *psli;
471         MAILBOX_t *mb;
472         uint32_t control;
473
474         psli = &phba->sli;
475         mb = &pmb->mb;
476         /* Since we don't do discovery right now, turn these off here */
477         psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
478         psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
479         psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
480
481         /* Check for error */
482         if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
483                 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
484                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
485                                 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
486                                 "state x%x\n",
487                                 phba->brd_no, mb->mbxStatus, phba->hba_state);
488
489                 phba->hba_state = LPFC_HBA_ERROR;
490                 goto out;
491         }
492
493         if (phba->fc_flag & FC_ABORT_DISCOVERY)
494                 goto out;
495
496         phba->num_disc_nodes = 0;
497         /* go thru NPR list and issue ELS PLOGIs */
498         if (phba->fc_npr_cnt) {
499                 lpfc_els_disc_plogi(phba);
500         }
501
502         if(!phba->num_disc_nodes) {
503                 spin_lock_irq(phba->host->host_lock);
504                 phba->fc_flag &= ~FC_NDISC_ACTIVE;
505                 spin_unlock_irq(phba->host->host_lock);
506         }
507
508         phba->hba_state = LPFC_HBA_READY;
509
510 out:
511         /* Device Discovery completes */
512         lpfc_printf_log(phba,
513                          KERN_INFO,
514                          LOG_DISCOVERY,
515                          "%d:0225 Device Discovery completes\n",
516                          phba->brd_no);
517
518         mempool_free( pmb, phba->mbox_mem_pool);
519
520         spin_lock_irq(phba->host->host_lock);
521         phba->fc_flag &= ~FC_ABORT_DISCOVERY;
522         if (phba->fc_flag & FC_ESTABLISH_LINK) {
523                 phba->fc_flag &= ~FC_ESTABLISH_LINK;
524         }
525         spin_unlock_irq(phba->host->host_lock);
526
527         del_timer_sync(&phba->fc_estabtmo);
528
529         lpfc_can_disctmo(phba);
530
531         /* turn on Link Attention interrupts */
532         spin_lock_irq(phba->host->host_lock);
533         psli->sli_flag |= LPFC_PROCESS_LA;
534         control = readl(phba->HCregaddr);
535         control |= HC_LAINT_ENA;
536         writel(control, phba->HCregaddr);
537         readl(phba->HCregaddr); /* flush */
538         spin_unlock_irq(phba->host->host_lock);
539
540         return;
541 }
542
543 static void
544 lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
545 {
546         struct lpfc_sli *psli;
547         MAILBOX_t *mb;
548
549         psli = &phba->sli;
550         mb = &pmb->mb;
551         /* Check for error */
552         if (mb->mbxStatus) {
553                 /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
554                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
555                                 "%d:0306 CONFIG_LINK mbxStatus error x%x "
556                                 "HBA state x%x\n",
557                                 phba->brd_no, mb->mbxStatus, phba->hba_state);
558
559                 lpfc_linkdown(phba);
560                 phba->hba_state = LPFC_HBA_ERROR;
561                 goto out;
562         }
563
564         if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
565                 if (phba->fc_topology == TOPOLOGY_LOOP) {
566                         /* If we are public loop and L bit was set */
567                         if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
568                             !(phba->fc_flag & FC_LBIT)) {
569                                 /* Need to wait for FAN - use discovery timer
570                                  * for timeout.  hba_state is identically
571                                  * LPFC_LOCAL_CFG_LINK while waiting for FAN
572                                  */
573                                 lpfc_set_disctmo(phba);
574                                 mempool_free( pmb, phba->mbox_mem_pool);
575                                 return;
576                         }
577                 }
578
579                 /* Start discovery by sending a FLOGI hba_state is identically
580                  * LPFC_FLOGI while waiting for FLOGI cmpl
581                  */
582                 phba->hba_state = LPFC_FLOGI;
583                 lpfc_set_disctmo(phba);
584                 lpfc_initial_flogi(phba);
585                 mempool_free( pmb, phba->mbox_mem_pool);
586                 return;
587         }
588         if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
589                 mempool_free( pmb, phba->mbox_mem_pool);
590                 return;
591         }
592
593 out:
594         /* CONFIG_LINK bad hba state <hba_state> */
595         lpfc_printf_log(phba,
596                         KERN_ERR,
597                         LOG_DISCOVERY,
598                         "%d:0200 CONFIG_LINK bad hba state x%x\n",
599                         phba->brd_no, phba->hba_state);
600
601         if (phba->hba_state != LPFC_CLEAR_LA) {
602                 lpfc_clear_la(phba, pmb);
603                 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
604                 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
605                     == MBX_NOT_FINISHED) {
606                         mempool_free( pmb, phba->mbox_mem_pool);
607                         lpfc_disc_flush_list(phba);
608                         psli->ring[(psli->ip_ring)].flag &=
609                                 ~LPFC_STOP_IOCB_EVENT;
610                         psli->ring[(psli->fcp_ring)].flag &=
611                                 ~LPFC_STOP_IOCB_EVENT;
612                         psli->ring[(psli->next_ring)].flag &=
613                                 ~LPFC_STOP_IOCB_EVENT;
614                         phba->hba_state = LPFC_HBA_READY;
615                 }
616         } else {
617                 mempool_free( pmb, phba->mbox_mem_pool);
618         }
619         return;
620 }
621
622 static void
623 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
624 {
625         struct lpfc_sli *psli = &phba->sli;
626         MAILBOX_t *mb = &pmb->mb;
627         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
628
629
630         /* Check for error */
631         if (mb->mbxStatus) {
632                 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
633                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
634                                 "%d:0319 READ_SPARAM mbxStatus error x%x "
635                                 "hba state x%x>\n",
636                                 phba->brd_no, mb->mbxStatus, phba->hba_state);
637
638                 lpfc_linkdown(phba);
639                 phba->hba_state = LPFC_HBA_ERROR;
640                 goto out;
641         }
642
643         memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
644                sizeof (struct serv_parm));
645         memcpy((uint8_t *) & phba->fc_nodename,
646                (uint8_t *) & phba->fc_sparam.nodeName,
647                sizeof (struct lpfc_name));
648         memcpy((uint8_t *) & phba->fc_portname,
649                (uint8_t *) & phba->fc_sparam.portName,
650                sizeof (struct lpfc_name));
651         lpfc_mbuf_free(phba, mp->virt, mp->phys);
652         kfree(mp);
653         mempool_free( pmb, phba->mbox_mem_pool);
654         return;
655
656 out:
657         pmb->context1 = NULL;
658         lpfc_mbuf_free(phba, mp->virt, mp->phys);
659         kfree(mp);
660         if (phba->hba_state != LPFC_CLEAR_LA) {
661                 lpfc_clear_la(phba, pmb);
662                 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
663                 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
664                     == MBX_NOT_FINISHED) {
665                         mempool_free( pmb, phba->mbox_mem_pool);
666                         lpfc_disc_flush_list(phba);
667                         psli->ring[(psli->ip_ring)].flag &=
668                             ~LPFC_STOP_IOCB_EVENT;
669                         psli->ring[(psli->fcp_ring)].flag &=
670                             ~LPFC_STOP_IOCB_EVENT;
671                         psli->ring[(psli->next_ring)].flag &=
672                             ~LPFC_STOP_IOCB_EVENT;
673                         phba->hba_state = LPFC_HBA_READY;
674                 }
675         } else {
676                 mempool_free( pmb, phba->mbox_mem_pool);
677         }
678         return;
679 }
680
681 static void
682 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
683 {
684         int i;
685         LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
686         sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
687         cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
688
689         spin_lock_irq(phba->host->host_lock);
690         switch(la->UlnkSpeed) {
691                 case LA_1GHZ_LINK:
692                         phba->fc_linkspeed = LA_1GHZ_LINK;
693                         break;
694                 case LA_2GHZ_LINK:
695                         phba->fc_linkspeed = LA_2GHZ_LINK;
696                         break;
697                 case LA_4GHZ_LINK:
698                         phba->fc_linkspeed = LA_4GHZ_LINK;
699                         break;
700                 default:
701                         phba->fc_linkspeed = LA_UNKNW_LINK;
702                         break;
703         }
704
705         phba->fc_topology = la->topology;
706
707         if (phba->fc_topology == TOPOLOGY_LOOP) {
708         /* Get Loop Map information */
709
710                 if (la->il)
711                         phba->fc_flag |= FC_LBIT;
712
713                 phba->fc_myDID = la->granted_AL_PA;
714                 i = la->un.lilpBde64.tus.f.bdeSize;
715
716                 if (i == 0) {
717                         phba->alpa_map[0] = 0;
718                 } else {
719                         if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
720                                 int numalpa, j, k;
721                                 union {
722                                         uint8_t pamap[16];
723                                         struct {
724                                                 uint32_t wd1;
725                                                 uint32_t wd2;
726                                                 uint32_t wd3;
727                                                 uint32_t wd4;
728                                         } pa;
729                                 } un;
730                                 numalpa = phba->alpa_map[0];
731                                 j = 0;
732                                 while (j < numalpa) {
733                                         memset(un.pamap, 0, 16);
734                                         for (k = 1; j < numalpa; k++) {
735                                                 un.pamap[k - 1] =
736                                                         phba->alpa_map[j + 1];
737                                                 j++;
738                                                 if (k == 16)
739                                                         break;
740                                         }
741                                         /* Link Up Event ALPA map */
742                                         lpfc_printf_log(phba,
743                                                 KERN_WARNING,
744                                                 LOG_LINK_EVENT,
745                                                 "%d:1304 Link Up Event "
746                                                 "ALPA map Data: x%x "
747                                                 "x%x x%x x%x\n",
748                                                 phba->brd_no,
749                                                 un.pa.wd1, un.pa.wd2,
750                                                 un.pa.wd3, un.pa.wd4);
751                                 }
752                         }
753                 }
754         } else {
755                 phba->fc_myDID = phba->fc_pref_DID;
756                 phba->fc_flag |= FC_LBIT;
757         }
758         spin_unlock_irq(phba->host->host_lock);
759
760         lpfc_linkup(phba);
761         if (sparam_mbox) {
762                 lpfc_read_sparam(phba, sparam_mbox);
763                 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
764                 lpfc_sli_issue_mbox(phba, sparam_mbox,
765                                                 (MBX_NOWAIT | MBX_STOP_IOCB));
766         }
767
768         if (cfglink_mbox) {
769                 phba->hba_state = LPFC_LOCAL_CFG_LINK;
770                 lpfc_config_link(phba, cfglink_mbox);
771                 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
772                 lpfc_sli_issue_mbox(phba, cfglink_mbox,
773                                                 (MBX_NOWAIT | MBX_STOP_IOCB));
774         }
775 }
776
777 static void
778 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
779         uint32_t control;
780         struct lpfc_sli *psli = &phba->sli;
781
782         lpfc_linkdown(phba);
783
784         /* turn on Link Attention interrupts - no CLEAR_LA needed */
785         spin_lock_irq(phba->host->host_lock);
786         psli->sli_flag |= LPFC_PROCESS_LA;
787         control = readl(phba->HCregaddr);
788         control |= HC_LAINT_ENA;
789         writel(control, phba->HCregaddr);
790         readl(phba->HCregaddr); /* flush */
791         spin_unlock_irq(phba->host->host_lock);
792 }
793
794 /*
795  * This routine handles processing a READ_LA mailbox
796  * command upon completion. It is setup in the LPFC_MBOXQ
797  * as the completion routine when the command is
798  * handed off to the SLI layer.
799  */
800 void
801 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
802 {
803         READ_LA_VAR *la;
804         MAILBOX_t *mb = &pmb->mb;
805         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
806
807         /* Check for error */
808         if (mb->mbxStatus) {
809                 lpfc_printf_log(phba,
810                                 KERN_INFO,
811                                 LOG_LINK_EVENT,
812                                 "%d:1307 READ_LA mbox error x%x state x%x\n",
813                                 phba->brd_no,
814                                 mb->mbxStatus, phba->hba_state);
815                 lpfc_mbx_issue_link_down(phba);
816                 phba->hba_state = LPFC_HBA_ERROR;
817                 goto lpfc_mbx_cmpl_read_la_free_mbuf;
818         }
819
820         la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
821
822         memcpy(&phba->alpa_map[0], mp->virt, 128);
823
824         spin_lock_irq(phba->host->host_lock);
825         if (la->pb)
826                 phba->fc_flag |= FC_BYPASSED_MODE;
827         else
828                 phba->fc_flag &= ~FC_BYPASSED_MODE;
829         spin_unlock_irq(phba->host->host_lock);
830
831         if (((phba->fc_eventTag + 1) < la->eventTag) ||
832              (phba->fc_eventTag == la->eventTag)) {
833                 phba->fc_stat.LinkMultiEvent++;
834                 if (la->attType == AT_LINK_UP) {
835                         if (phba->fc_eventTag != 0)
836                                 lpfc_linkdown(phba);
837                 }
838         }
839
840         phba->fc_eventTag = la->eventTag;
841
842         if (la->attType == AT_LINK_UP) {
843                 phba->fc_stat.LinkUp++;
844                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
845                                 "%d:1303 Link Up Event x%x received "
846                                 "Data: x%x x%x x%x x%x\n",
847                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
848                                 la->granted_AL_PA, la->UlnkSpeed,
849                                 phba->alpa_map[0]);
850                 lpfc_mbx_process_link_up(phba, la);
851         } else {
852                 phba->fc_stat.LinkDown++;
853                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
854                                 "%d:1305 Link Down Event x%x received "
855                                 "Data: x%x x%x x%x\n",
856                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
857                                 phba->hba_state, phba->fc_flag);
858                 lpfc_mbx_issue_link_down(phba);
859         }
860
861 lpfc_mbx_cmpl_read_la_free_mbuf:
862         lpfc_mbuf_free(phba, mp->virt, mp->phys);
863         kfree(mp);
864         mempool_free(pmb, phba->mbox_mem_pool);
865         return;
866 }
867
868 /*
869  * This routine handles processing a REG_LOGIN mailbox
870  * command upon completion. It is setup in the LPFC_MBOXQ
871  * as the completion routine when the command is
872  * handed off to the SLI layer.
873  */
874 void
875 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
876 {
877         struct lpfc_sli *psli;
878         MAILBOX_t *mb;
879         struct lpfc_dmabuf *mp;
880         struct lpfc_nodelist *ndlp;
881
882         psli = &phba->sli;
883         mb = &pmb->mb;
884
885         ndlp = (struct lpfc_nodelist *) pmb->context2;
886         mp = (struct lpfc_dmabuf *) (pmb->context1);
887
888         pmb->context1 = NULL;
889
890         /* Good status, call state machine */
891         lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
892         lpfc_mbuf_free(phba, mp->virt, mp->phys);
893         kfree(mp);
894         mempool_free( pmb, phba->mbox_mem_pool);
895
896         return;
897 }
898
899 /*
900  * This routine handles processing a Fabric REG_LOGIN mailbox
901  * command upon completion. It is setup in the LPFC_MBOXQ
902  * as the completion routine when the command is
903  * handed off to the SLI layer.
904  */
905 void
906 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
907 {
908         struct lpfc_sli *psli;
909         MAILBOX_t *mb;
910         struct lpfc_dmabuf *mp;
911         struct lpfc_nodelist *ndlp;
912         struct lpfc_nodelist *ndlp_fdmi;
913
914
915         psli = &phba->sli;
916         mb = &pmb->mb;
917
918         ndlp = (struct lpfc_nodelist *) pmb->context2;
919         mp = (struct lpfc_dmabuf *) (pmb->context1);
920
921         if (mb->mbxStatus) {
922                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
923                 kfree(mp);
924                 mempool_free( pmb, phba->mbox_mem_pool);
925                 mempool_free( ndlp, phba->nlp_mem_pool);
926
927                 /* FLOGI failed, so just use loop map to make discovery list */
928                 lpfc_disc_list_loopmap(phba);
929
930                 /* Start discovery */
931                 lpfc_disc_start(phba);
932                 return;
933         }
934
935         pmb->context1 = NULL;
936
937         ndlp->nlp_rpi = mb->un.varWords[0];
938         ndlp->nlp_type |= NLP_FABRIC;
939         ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
940         lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
941
942         if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
943                 /* This NPort has been assigned an NPort_ID by the fabric as a
944                  * result of the completed fabric login.  Issue a State Change
945                  * Registration (SCR) ELS request to the fabric controller
946                  * (SCR_DID) so that this NPort gets RSCN events from the
947                  * fabric.
948                  */
949                 lpfc_issue_els_scr(phba, SCR_DID, 0);
950
951                 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
952                 if (!ndlp) {
953                         /* Allocate a new node instance. If the pool is empty,
954                          * start the discovery process and skip the Nameserver
955                          * login process.  This is attempted again later on.
956                          * Otherwise, issue a Port Login (PLOGI) to NameServer.
957                          */
958                         ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
959                         if (!ndlp) {
960                                 lpfc_disc_start(phba);
961                                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
962                                 kfree(mp);
963                                 mempool_free( pmb, phba->mbox_mem_pool);
964                                 return;
965                         } else {
966                                 lpfc_nlp_init(phba, ndlp, NameServer_DID);
967                                 ndlp->nlp_type |= NLP_FABRIC;
968                         }
969                 }
970                 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
971                 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
972                 lpfc_issue_els_plogi(phba, ndlp, 0);
973                 if (phba->cfg_fdmi_on) {
974                         ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
975                                                                 GFP_KERNEL);
976                         if (ndlp_fdmi) {
977                                 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
978                                 ndlp_fdmi->nlp_type |= NLP_FABRIC;
979                                 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
980                                 lpfc_issue_els_plogi(phba, ndlp_fdmi, 0);
981                         }
982                 }
983         }
984
985         lpfc_mbuf_free(phba, mp->virt, mp->phys);
986         kfree(mp);
987         mempool_free( pmb, phba->mbox_mem_pool);
988         return;
989 }
990
991 /*
992  * This routine handles processing a NameServer REG_LOGIN mailbox
993  * command upon completion. It is setup in the LPFC_MBOXQ
994  * as the completion routine when the command is
995  * handed off to the SLI layer.
996  */
997 void
998 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
999 {
1000         struct lpfc_sli *psli;
1001         MAILBOX_t *mb;
1002         struct lpfc_dmabuf *mp;
1003         struct lpfc_nodelist *ndlp;
1004
1005         psli = &phba->sli;
1006         mb = &pmb->mb;
1007
1008         ndlp = (struct lpfc_nodelist *) pmb->context2;
1009         mp = (struct lpfc_dmabuf *) (pmb->context1);
1010
1011         if (mb->mbxStatus) {
1012                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1013                 kfree(mp);
1014                 mempool_free( pmb, phba->mbox_mem_pool);
1015                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1016
1017                 /* RegLogin failed, so just use loop map to make discovery
1018                    list */
1019                 lpfc_disc_list_loopmap(phba);
1020
1021                 /* Start discovery */
1022                 lpfc_disc_start(phba);
1023                 return;
1024         }
1025
1026         pmb->context1 = NULL;
1027
1028         ndlp->nlp_rpi = mb->un.varWords[0];
1029         ndlp->nlp_type |= NLP_FABRIC;
1030         ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1031         lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1032
1033         if (phba->hba_state < LPFC_HBA_READY) {
1034                 /* Link up discovery requires Fabrib registration. */
1035                 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1036                 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1037                 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1038         }
1039
1040         phba->fc_ns_retry = 0;
1041         /* Good status, issue CT Request to NameServer */
1042         if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1043                 /* Cannot issue NameServer Query, so finish up discovery */
1044                 lpfc_disc_start(phba);
1045         }
1046
1047         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1048         kfree(mp);
1049         mempool_free( pmb, phba->mbox_mem_pool);
1050
1051         return;
1052 }
1053
1054 static void
1055 lpfc_register_remote_port(struct lpfc_hba * phba,
1056                             struct lpfc_nodelist * ndlp)
1057 {
1058         struct fc_rport *rport;
1059         struct lpfc_rport_data *rdata;
1060         struct fc_rport_identifiers rport_ids;
1061
1062         /* Remote port has reappeared. Re-register w/ FC transport */
1063         rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1064         rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1065         rport_ids.port_id = ndlp->nlp_DID;
1066         rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1067
1068         ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1069         if (!rport) {
1070                 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1071                            "Warning: fc_remote_port_add failed\n");
1072                 return;
1073         }
1074
1075         /* initialize static port data */
1076         rport->maxframe_size = ndlp->nlp_maxframe;
1077         rport->supported_classes = ndlp->nlp_class_sup;
1078         if ((rport->scsi_target_id != -1) &&
1079                 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1080                 ndlp->nlp_sid = rport->scsi_target_id;
1081         }
1082         rdata = rport->dd_data;
1083         rdata->pnode = ndlp;
1084
1085         if (ndlp->nlp_type & NLP_FCP_TARGET)
1086                 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1087         if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1088                 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1089
1090
1091         if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
1092                 fc_remote_port_rolechg(rport, rport_ids.roles);
1093
1094
1095         return;
1096 }
1097
1098 static void
1099 lpfc_unregister_remote_port(struct lpfc_hba * phba,
1100                             struct lpfc_nodelist * ndlp)
1101 {
1102         struct fc_rport *rport = ndlp->rport;
1103         struct lpfc_rport_data *rdata = rport->dd_data;
1104
1105         ndlp->rport = NULL;
1106         rdata->pnode = NULL;
1107         fc_remote_port_delete(rport);
1108
1109         return;
1110 }
1111
1112 int
1113 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1114 {
1115         enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1116         struct lpfc_sli      *psli;
1117
1118         psli = &phba->sli;
1119         /* Sanity check to ensure we are not moving to / from the same list */
1120         if ((nlp->nlp_flag & NLP_LIST_MASK) == list) {
1121                 if (list != NLP_NO_LIST)
1122                         return(0);
1123         }
1124
1125         switch(nlp->nlp_flag & NLP_LIST_MASK) {
1126         case NLP_NO_LIST: /* Not on any list */
1127                 break;
1128         case NLP_UNUSED_LIST:
1129                 phba->fc_unused_cnt--;
1130                 list_del(&nlp->nlp_listp);
1131                 break;
1132         case NLP_PLOGI_LIST:
1133                 phba->fc_plogi_cnt--;
1134                 list_del(&nlp->nlp_listp);
1135                 break;
1136         case NLP_ADISC_LIST:
1137                 phba->fc_adisc_cnt--;
1138                 list_del(&nlp->nlp_listp);
1139                 break;
1140         case NLP_REGLOGIN_LIST:
1141                 phba->fc_reglogin_cnt--;
1142                 list_del(&nlp->nlp_listp);
1143                 break;
1144         case NLP_PRLI_LIST:
1145                 phba->fc_prli_cnt--;
1146                 list_del(&nlp->nlp_listp);
1147                 break;
1148         case NLP_UNMAPPED_LIST:
1149                 phba->fc_unmap_cnt--;
1150                 list_del(&nlp->nlp_listp);
1151                 spin_lock_irq(phba->host->host_lock);
1152                 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1153                 nlp->nlp_type &= ~NLP_FC_NODE;
1154                 spin_unlock_irq(phba->host->host_lock);
1155                 phba->nport_event_cnt++;
1156                 if (nlp->rport)
1157                         rport_del = unmapped;
1158                 break;
1159         case NLP_MAPPED_LIST:
1160                 phba->fc_map_cnt--;
1161                 list_del(&nlp->nlp_listp);
1162                 phba->nport_event_cnt++;
1163                 if (nlp->rport)
1164                         rport_del = mapped;
1165                 break;
1166         case NLP_NPR_LIST:
1167                 phba->fc_npr_cnt--;
1168                 list_del(&nlp->nlp_listp);
1169                 /* Stop delay tmo if taking node off NPR list */
1170                 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1171                    (list != NLP_NPR_LIST)) {
1172                         spin_lock_irq(phba->host->host_lock);
1173                         nlp->nlp_flag &= ~NLP_DELAY_TMO;
1174                         spin_unlock_irq(phba->host->host_lock);
1175                         del_timer_sync(&nlp->nlp_delayfunc);
1176                         if (!list_empty(&nlp->els_retry_evt.evt_listp))
1177                                 list_del_init(&nlp->els_retry_evt.evt_listp);
1178                 }
1179                 break;
1180         }
1181
1182         spin_lock_irq(phba->host->host_lock);
1183         nlp->nlp_flag &= ~NLP_LIST_MASK;
1184         spin_unlock_irq(phba->host->host_lock);
1185
1186         /* Add NPort <did> to <num> list */
1187         lpfc_printf_log(phba,
1188                         KERN_INFO,
1189                         LOG_NODE,
1190                         "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1191                         phba->brd_no,
1192                         nlp->nlp_DID, list, nlp->nlp_flag);
1193
1194         switch(list) {
1195         case NLP_NO_LIST: /* No list, just remove it */
1196                 lpfc_nlp_remove(phba, nlp);
1197                 /* as node removed - stop further transport calls */
1198                 rport_del = none;
1199                 break;
1200         case NLP_UNUSED_LIST:
1201                 spin_lock_irq(phba->host->host_lock);
1202                 nlp->nlp_flag |= list;
1203                 spin_unlock_irq(phba->host->host_lock);
1204                 /* Put it at the end of the unused list */
1205                 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1206                 phba->fc_unused_cnt++;
1207                 break;
1208         case NLP_PLOGI_LIST:
1209                 spin_lock_irq(phba->host->host_lock);
1210                 nlp->nlp_flag |= list;
1211                 spin_unlock_irq(phba->host->host_lock);
1212                 /* Put it at the end of the plogi list */
1213                 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1214                 phba->fc_plogi_cnt++;
1215                 break;
1216         case NLP_ADISC_LIST:
1217                 spin_lock_irq(phba->host->host_lock);
1218                 nlp->nlp_flag |= list;
1219                 spin_unlock_irq(phba->host->host_lock);
1220                 /* Put it at the end of the adisc list */
1221                 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1222                 phba->fc_adisc_cnt++;
1223                 break;
1224         case NLP_REGLOGIN_LIST:
1225                 spin_lock_irq(phba->host->host_lock);
1226                 nlp->nlp_flag |= list;
1227                 spin_unlock_irq(phba->host->host_lock);
1228                 /* Put it at the end of the reglogin list */
1229                 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1230                 phba->fc_reglogin_cnt++;
1231                 break;
1232         case NLP_PRLI_LIST:
1233                 spin_lock_irq(phba->host->host_lock);
1234                 nlp->nlp_flag |= list;
1235                 spin_unlock_irq(phba->host->host_lock);
1236                 /* Put it at the end of the prli list */
1237                 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1238                 phba->fc_prli_cnt++;
1239                 break;
1240         case NLP_UNMAPPED_LIST:
1241                 rport_add = unmapped;
1242                 /* ensure all vestiges of "mapped" significance are gone */
1243                 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1244                 spin_lock_irq(phba->host->host_lock);
1245                 nlp->nlp_flag |= list;
1246                 spin_unlock_irq(phba->host->host_lock);
1247                 /* Put it at the end of the unmap list */
1248                 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1249                 phba->fc_unmap_cnt++;
1250                 phba->nport_event_cnt++;
1251                 /* stop nodev tmo if running */
1252                 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1253                         spin_lock_irq(phba->host->host_lock);
1254                         nlp->nlp_flag &= ~NLP_NODEV_TMO;
1255                         spin_unlock_irq(phba->host->host_lock);
1256                         del_timer_sync(&nlp->nlp_tmofunc);
1257                         if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1258                                 list_del_init(&nlp->nodev_timeout_evt.
1259                                                 evt_listp);
1260
1261                 }
1262                 nlp->nlp_type |= NLP_FC_NODE;
1263                 break;
1264         case NLP_MAPPED_LIST:
1265                 rport_add = mapped;
1266                 spin_lock_irq(phba->host->host_lock);
1267                 nlp->nlp_flag |= list;
1268                 spin_unlock_irq(phba->host->host_lock);
1269                 /* Put it at the end of the map list */
1270                 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1271                 phba->fc_map_cnt++;
1272                 phba->nport_event_cnt++;
1273                 /* stop nodev tmo if running */
1274                 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1275                         nlp->nlp_flag &= ~NLP_NODEV_TMO;
1276                         del_timer_sync(&nlp->nlp_tmofunc);
1277                         if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1278                                 list_del_init(&nlp->nodev_timeout_evt.
1279                                                 evt_listp);
1280
1281                 }
1282                 break;
1283         case NLP_NPR_LIST:
1284                 spin_lock_irq(phba->host->host_lock);
1285                 nlp->nlp_flag |= list;
1286                 spin_unlock_irq(phba->host->host_lock);
1287                 /* Put it at the end of the npr list */
1288                 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1289                 phba->fc_npr_cnt++;
1290
1291                 if (!(nlp->nlp_flag & NLP_NODEV_TMO)) {
1292                         mod_timer(&nlp->nlp_tmofunc,
1293                                         jiffies + HZ * phba->cfg_nodev_tmo);
1294                 }
1295                 spin_lock_irq(phba->host->host_lock);
1296                 nlp->nlp_flag |= NLP_NODEV_TMO;
1297                 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1298                 spin_unlock_irq(phba->host->host_lock);
1299                 break;
1300         case NLP_JUST_DQ:
1301                 break;
1302         }
1303
1304         /*
1305          * We make all the calls into the transport after we have
1306          * moved the node between lists. This so that we don't
1307          * release the lock while in-between lists.
1308          */
1309
1310         /* Don't upcall midlayer if we're unloading */
1311         if (!(phba->fc_flag & FC_UNLOADING)) {
1312                 /*
1313                  * We revalidate the rport pointer as the "add" function
1314                  * may have removed the remote port.
1315                  */
1316                 if ((rport_del != none) && nlp->rport)
1317                         lpfc_unregister_remote_port(phba, nlp);
1318
1319                 if (rport_add != none) {
1320                         /*
1321                          * Tell the fc transport about the port, if we haven't
1322                          * already. If we have, and it's a scsi entity, be
1323                          * sure to unblock any attached scsi devices
1324                          */
1325                         if (!nlp->rport)
1326                                 lpfc_register_remote_port(phba, nlp);
1327
1328                         /*
1329                          * if we added to Mapped list, but the remote port
1330                          * registration failed or assigned a target id outside
1331                          * our presentable range - move the node to the
1332                          * Unmapped List
1333                          */
1334                         if ((rport_add == mapped) &&
1335                             ((!nlp->rport) ||
1336                              (nlp->rport->scsi_target_id == -1) ||
1337                              (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1338                                 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1339                                 spin_lock_irq(phba->host->host_lock);
1340                                 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1341                                 spin_unlock_irq(phba->host->host_lock);
1342                                 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1343                         }
1344                 }
1345         }
1346         return (0);
1347 }
1348
1349 /*
1350  * Start / ReStart rescue timer for Discovery / RSCN handling
1351  */
1352 void
1353 lpfc_set_disctmo(struct lpfc_hba * phba)
1354 {
1355         uint32_t tmo;
1356
1357         if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1358                 /* For FAN, timeout should be greater then edtov */
1359                 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1360         } else {
1361                 /* Normal discovery timeout should be > then ELS/CT timeout
1362                  * FC spec states we need 3 * ratov for CT requests
1363                  */
1364                 tmo = ((phba->fc_ratov * 3) + 3);
1365         }
1366
1367         mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1368         spin_lock_irq(phba->host->host_lock);
1369         phba->fc_flag |= FC_DISC_TMO;
1370         spin_unlock_irq(phba->host->host_lock);
1371
1372         /* Start Discovery Timer state <hba_state> */
1373         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1374                         "%d:0247 Start Discovery Timer state x%x "
1375                         "Data: x%x x%lx x%x x%x\n",
1376                         phba->brd_no,
1377                         phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1378                         phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1379
1380         return;
1381 }
1382
1383 /*
1384  * Cancel rescue timer for Discovery / RSCN handling
1385  */
1386 int
1387 lpfc_can_disctmo(struct lpfc_hba * phba)
1388 {
1389         /* Turn off discovery timer if its running */
1390         if (phba->fc_flag & FC_DISC_TMO) {
1391                 spin_lock_irq(phba->host->host_lock);
1392                 phba->fc_flag &= ~FC_DISC_TMO;
1393                 spin_unlock_irq(phba->host->host_lock);
1394                 del_timer_sync(&phba->fc_disctmo);
1395                 phba->work_hba_events &= ~WORKER_DISC_TMO;
1396         }
1397
1398         /* Cancel Discovery Timer state <hba_state> */
1399         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1400                         "%d:0248 Cancel Discovery Timer state x%x "
1401                         "Data: x%x x%x x%x\n",
1402                         phba->brd_no, phba->hba_state, phba->fc_flag,
1403                         phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1404
1405         return (0);
1406 }
1407
1408 /*
1409  * Check specified ring for outstanding IOCB on the SLI queue
1410  * Return true if iocb matches the specified nport
1411  */
1412 int
1413 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1414                     struct lpfc_sli_ring * pring,
1415                     struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1416 {
1417         struct lpfc_sli *psli;
1418         IOCB_t *icmd;
1419
1420         psli = &phba->sli;
1421         icmd = &iocb->iocb;
1422         if (pring->ringno == LPFC_ELS_RING) {
1423                 switch (icmd->ulpCommand) {
1424                 case CMD_GEN_REQUEST64_CR:
1425                         if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1426                                 return (1);
1427                 case CMD_ELS_REQUEST64_CR:
1428                 case CMD_XMIT_ELS_RSP64_CX:
1429                         if (iocb->context1 == (uint8_t *) ndlp)
1430                                 return (1);
1431                 }
1432         } else if (pring->ringno == psli->ip_ring) {
1433
1434         } else if (pring->ringno == psli->fcp_ring) {
1435                 /* Skip match check if waiting to relogin to FCP target */
1436                 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1437                   (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1438                         return (0);
1439                 }
1440                 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1441                         return (1);
1442                 }
1443         } else if (pring->ringno == psli->next_ring) {
1444
1445         }
1446         return (0);
1447 }
1448
1449 /*
1450  * Free resources / clean up outstanding I/Os
1451  * associated with nlp_rpi in the LPFC_NODELIST entry.
1452  */
1453 static int
1454 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1455 {
1456         struct lpfc_sli *psli;
1457         struct lpfc_sli_ring *pring;
1458         struct lpfc_iocbq *iocb, *next_iocb;
1459         IOCB_t *icmd;
1460         uint32_t rpi, i;
1461
1462         /*
1463          * Everything that matches on txcmplq will be returned
1464          * by firmware with a no rpi error.
1465          */
1466         psli = &phba->sli;
1467         rpi = ndlp->nlp_rpi;
1468         if (rpi) {
1469                 /* Now process each ring */
1470                 for (i = 0; i < psli->num_rings; i++) {
1471                         pring = &psli->ring[i];
1472
1473                         spin_lock_irq(phba->host->host_lock);
1474                         list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1475                                                 list) {
1476                                 /*
1477                                  * Check to see if iocb matches the nport we are
1478                                  * looking for
1479                                  */
1480                                 if ((lpfc_check_sli_ndlp
1481                                      (phba, pring, iocb, ndlp))) {
1482                                         /* It matches, so deque and call compl
1483                                            with an error */
1484                                         list_del(&iocb->list);
1485                                         pring->txq_cnt--;
1486                                         if (iocb->iocb_cmpl) {
1487                                                 icmd = &iocb->iocb;
1488                                                 icmd->ulpStatus =
1489                                                     IOSTAT_LOCAL_REJECT;
1490                                                 icmd->un.ulpWord[4] =
1491                                                     IOERR_SLI_ABORTED;
1492                                                 spin_unlock_irq(phba->host->
1493                                                                 host_lock);
1494                                                 (iocb->iocb_cmpl) (phba,
1495                                                                    iocb, iocb);
1496                                                 spin_lock_irq(phba->host->
1497                                                               host_lock);
1498                                         } else
1499                                                 lpfc_sli_release_iocbq(phba,
1500                                                                        iocb);
1501                                 }
1502                         }
1503                         spin_unlock_irq(phba->host->host_lock);
1504
1505                 }
1506         }
1507         return (0);
1508 }
1509
1510 /*
1511  * Free rpi associated with LPFC_NODELIST entry.
1512  * This routine is called from lpfc_freenode(), when we are removing
1513  * a LPFC_NODELIST entry. It is also called if the driver initiates a
1514  * LOGO that completes successfully, and we are waiting to PLOGI back
1515  * to the remote NPort. In addition, it is called after we receive
1516  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1517  * we are waiting to PLOGI back to the remote NPort.
1518  */
1519 int
1520 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1521 {
1522         LPFC_MBOXQ_t *mbox;
1523         int rc;
1524
1525         if (ndlp->nlp_rpi) {
1526                 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1527                         lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1528                         mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1529                         rc = lpfc_sli_issue_mbox
1530                                     (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1531                         if (rc == MBX_NOT_FINISHED)
1532                                 mempool_free( mbox, phba->mbox_mem_pool);
1533                 }
1534                 lpfc_no_rpi(phba, ndlp);
1535                 ndlp->nlp_rpi = 0;
1536                 return 1;
1537         }
1538         return 0;
1539 }
1540
1541 /*
1542  * Free resources associated with LPFC_NODELIST entry
1543  * so it can be freed.
1544  */
1545 static int
1546 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1547 {
1548         LPFC_MBOXQ_t       *mb;
1549         LPFC_MBOXQ_t       *nextmb;
1550         struct lpfc_dmabuf *mp;
1551
1552         /* Cleanup node for NPort <nlp_DID> */
1553         lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1554                         "%d:0900 Cleanup node for NPort x%x "
1555                         "Data: x%x x%x x%x\n",
1556                         phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1557                         ndlp->nlp_state, ndlp->nlp_rpi);
1558
1559         lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1560
1561         /*
1562          * if unloading the driver - just leave the remote port in place.
1563          * The driver unload will force the attached devices to detach
1564          * and flush cache's w/o generating flush errors.
1565          */
1566         if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1567                 lpfc_unregister_remote_port(phba, ndlp);
1568                 ndlp->nlp_sid = NLP_NO_SID;
1569         }
1570
1571         /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1572         if ((mb = phba->sli.mbox_active)) {
1573                 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1574                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1575                         mb->context2 = NULL;
1576                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1577                 }
1578         }
1579         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1580                 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1581                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1582                         mp = (struct lpfc_dmabuf *) (mb->context1);
1583                         if (mp) {
1584                                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1585                                 kfree(mp);
1586                         }
1587                         list_del(&mb->list);
1588                         mempool_free(mb, phba->mbox_mem_pool);
1589                 }
1590         }
1591
1592         lpfc_els_abort(phba,ndlp,0);
1593         spin_lock_irq(phba->host->host_lock);
1594         ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1595         spin_unlock_irq(phba->host->host_lock);
1596         del_timer_sync(&ndlp->nlp_tmofunc);
1597
1598         del_timer_sync(&ndlp->nlp_delayfunc);
1599
1600         if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1601                 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1602         if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1603                 list_del_init(&ndlp->els_retry_evt.evt_listp);
1604
1605         lpfc_unreg_rpi(phba, ndlp);
1606
1607         return (0);
1608 }
1609
1610 /*
1611  * Check to see if we can free the nlp back to the freelist.
1612  * If we are in the middle of using the nlp in the discovery state
1613  * machine, defer the free till we reach the end of the state machine.
1614  */
1615 int
1616 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1617 {
1618         if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1619                 spin_lock_irq(phba->host->host_lock);
1620                 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1621                 spin_unlock_irq(phba->host->host_lock);
1622                 del_timer_sync(&ndlp->nlp_tmofunc);
1623                 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1624                         list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1625
1626         }
1627
1628
1629         if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1630                 spin_lock_irq(phba->host->host_lock);
1631                 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1632                 spin_unlock_irq(phba->host->host_lock);
1633                 del_timer_sync(&ndlp->nlp_delayfunc);
1634                 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1635                         list_del_init(&ndlp->els_retry_evt.evt_listp);
1636         }
1637
1638         if (ndlp->nlp_disc_refcnt) {
1639                 spin_lock_irq(phba->host->host_lock);
1640                 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1641                 spin_unlock_irq(phba->host->host_lock);
1642         }
1643         else {
1644                 lpfc_freenode(phba, ndlp);
1645                 mempool_free( ndlp, phba->nlp_mem_pool);
1646         }
1647         return(0);
1648 }
1649
1650 static int
1651 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1652 {
1653         D_ID mydid;
1654         D_ID ndlpdid;
1655         D_ID matchdid;
1656
1657         if (did == Bcast_DID)
1658                 return (0);
1659
1660         if (ndlp->nlp_DID == 0) {
1661                 return (0);
1662         }
1663
1664         /* First check for Direct match */
1665         if (ndlp->nlp_DID == did)
1666                 return (1);
1667
1668         /* Next check for area/domain identically equals 0 match */
1669         mydid.un.word = phba->fc_myDID;
1670         if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1671                 return (0);
1672         }
1673
1674         matchdid.un.word = did;
1675         ndlpdid.un.word = ndlp->nlp_DID;
1676         if (matchdid.un.b.id == ndlpdid.un.b.id) {
1677                 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1678                     (mydid.un.b.area == matchdid.un.b.area)) {
1679                         if ((ndlpdid.un.b.domain == 0) &&
1680                             (ndlpdid.un.b.area == 0)) {
1681                                 if (ndlpdid.un.b.id)
1682                                         return (1);
1683                         }
1684                         return (0);
1685                 }
1686
1687                 matchdid.un.word = ndlp->nlp_DID;
1688                 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1689                     (mydid.un.b.area == ndlpdid.un.b.area)) {
1690                         if ((matchdid.un.b.domain == 0) &&
1691                             (matchdid.un.b.area == 0)) {
1692                                 if (matchdid.un.b.id)
1693                                         return (1);
1694                         }
1695                 }
1696         }
1697         return (0);
1698 }
1699
1700 /* Search for a nodelist entry on a specific list */
1701 struct lpfc_nodelist *
1702 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1703 {
1704         struct lpfc_nodelist *ndlp, *next_ndlp;
1705         uint32_t data1;
1706
1707         if (order & NLP_SEARCH_UNMAPPED) {
1708                 list_for_each_entry_safe(ndlp, next_ndlp,
1709                                          &phba->fc_nlpunmap_list, nlp_listp) {
1710                         if (lpfc_matchdid(phba, ndlp, did)) {
1711                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1712                                          ((uint32_t) ndlp->nlp_xri << 16) |
1713                                          ((uint32_t) ndlp->nlp_type << 8) |
1714                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1715                                 /* FIND node DID unmapped */
1716                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1717                                                 "%d:0929 FIND node DID unmapped"
1718                                                 " Data: x%p x%x x%x x%x\n",
1719                                                 phba->brd_no,
1720                                                 ndlp, ndlp->nlp_DID,
1721                                                 ndlp->nlp_flag, data1);
1722                                 return (ndlp);
1723                         }
1724                 }
1725         }
1726
1727         if (order & NLP_SEARCH_MAPPED) {
1728                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1729                                         nlp_listp) {
1730                         if (lpfc_matchdid(phba, ndlp, did)) {
1731
1732                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1733                                          ((uint32_t) ndlp->nlp_xri << 16) |
1734                                          ((uint32_t) ndlp->nlp_type << 8) |
1735                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1736                                 /* FIND node DID mapped */
1737                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1738                                                 "%d:0930 FIND node DID mapped "
1739                                                 "Data: x%p x%x x%x x%x\n",
1740                                                 phba->brd_no,
1741                                                 ndlp, ndlp->nlp_DID,
1742                                                 ndlp->nlp_flag, data1);
1743                                 return (ndlp);
1744                         }
1745                 }
1746         }
1747
1748         if (order & NLP_SEARCH_PLOGI) {
1749                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1750                                         nlp_listp) {
1751                         if (lpfc_matchdid(phba, ndlp, did)) {
1752
1753                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1754                                          ((uint32_t) ndlp->nlp_xri << 16) |
1755                                          ((uint32_t) ndlp->nlp_type << 8) |
1756                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1757                                 /* LOG change to PLOGI */
1758                                 /* FIND node DID plogi */
1759                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1760                                                 "%d:0908 FIND node DID plogi "
1761                                                 "Data: x%p x%x x%x x%x\n",
1762                                                 phba->brd_no,
1763                                                 ndlp, ndlp->nlp_DID,
1764                                                 ndlp->nlp_flag, data1);
1765                                 return (ndlp);
1766                         }
1767                 }
1768         }
1769
1770         if (order & NLP_SEARCH_ADISC) {
1771                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1772                                         nlp_listp) {
1773                         if (lpfc_matchdid(phba, ndlp, did)) {
1774
1775                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1776                                          ((uint32_t) ndlp->nlp_xri << 16) |
1777                                          ((uint32_t) ndlp->nlp_type << 8) |
1778                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1779                                 /* LOG change to ADISC */
1780                                 /* FIND node DID adisc */
1781                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1782                                                 "%d:0931 FIND node DID adisc "
1783                                                 "Data: x%p x%x x%x x%x\n",
1784                                                 phba->brd_no,
1785                                                 ndlp, ndlp->nlp_DID,
1786                                                 ndlp->nlp_flag, data1);
1787                                 return (ndlp);
1788                         }
1789                 }
1790         }
1791
1792         if (order & NLP_SEARCH_REGLOGIN) {
1793                 list_for_each_entry_safe(ndlp, next_ndlp,
1794                                          &phba->fc_reglogin_list, nlp_listp) {
1795                         if (lpfc_matchdid(phba, ndlp, did)) {
1796
1797                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1798                                          ((uint32_t) ndlp->nlp_xri << 16) |
1799                                          ((uint32_t) ndlp->nlp_type << 8) |
1800                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1801                                 /* LOG change to REGLOGIN */
1802                                 /* FIND node DID reglogin */
1803                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1804                                                 "%d:0931 FIND node DID reglogin"
1805                                                 " Data: x%p x%x x%x x%x\n",
1806                                                 phba->brd_no,
1807                                                 ndlp, ndlp->nlp_DID,
1808                                                 ndlp->nlp_flag, data1);
1809                                 return (ndlp);
1810                         }
1811                 }
1812         }
1813
1814         if (order & NLP_SEARCH_PRLI) {
1815                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1816                                         nlp_listp) {
1817                         if (lpfc_matchdid(phba, ndlp, did)) {
1818
1819                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1820                                          ((uint32_t) ndlp->nlp_xri << 16) |
1821                                          ((uint32_t) ndlp->nlp_type << 8) |
1822                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1823                                 /* LOG change to PRLI */
1824                                 /* FIND node DID prli */
1825                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1826                                                 "%d:0931 FIND node DID prli "
1827                                                 "Data: x%p x%x x%x x%x\n",
1828                                                 phba->brd_no,
1829                                                 ndlp, ndlp->nlp_DID,
1830                                                 ndlp->nlp_flag, data1);
1831                                 return (ndlp);
1832                         }
1833                 }
1834         }
1835
1836         if (order & NLP_SEARCH_NPR) {
1837                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1838                                         nlp_listp) {
1839                         if (lpfc_matchdid(phba, ndlp, did)) {
1840
1841                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1842                                          ((uint32_t) ndlp->nlp_xri << 16) |
1843                                          ((uint32_t) ndlp->nlp_type << 8) |
1844                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1845                                 /* LOG change to NPR */
1846                                 /* FIND node DID npr */
1847                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1848                                                 "%d:0931 FIND node DID npr "
1849                                                 "Data: x%p x%x x%x x%x\n",
1850                                                 phba->brd_no,
1851                                                 ndlp, ndlp->nlp_DID,
1852                                                 ndlp->nlp_flag, data1);
1853                                 return (ndlp);
1854                         }
1855                 }
1856         }
1857
1858         if (order & NLP_SEARCH_UNUSED) {
1859                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1860                                         nlp_listp) {
1861                         if (lpfc_matchdid(phba, ndlp, did)) {
1862
1863                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1864                                          ((uint32_t) ndlp->nlp_xri << 16) |
1865                                          ((uint32_t) ndlp->nlp_type << 8) |
1866                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1867                                 /* LOG change to UNUSED */
1868                                 /* FIND node DID unused */
1869                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1870                                                 "%d:0931 FIND node DID unused "
1871                                                 "Data: x%p x%x x%x x%x\n",
1872                                                 phba->brd_no,
1873                                                 ndlp, ndlp->nlp_DID,
1874                                                 ndlp->nlp_flag, data1);
1875                                 return (ndlp);
1876                         }
1877                 }
1878         }
1879
1880         /* FIND node did <did> NOT FOUND */
1881         lpfc_printf_log(phba,
1882                         KERN_INFO,
1883                         LOG_NODE,
1884                         "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1885                         phba->brd_no, did, order);
1886
1887         /* no match found */
1888         return NULL;
1889 }
1890
1891 struct lpfc_nodelist *
1892 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1893 {
1894         struct lpfc_nodelist *ndlp;
1895         uint32_t flg;
1896
1897         ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1898         if (!ndlp) {
1899                 if ((phba->fc_flag & FC_RSCN_MODE) &&
1900                    ((lpfc_rscn_payload_check(phba, did) == 0)))
1901                         return NULL;
1902                 ndlp = (struct lpfc_nodelist *)
1903                      mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1904                 if (!ndlp)
1905                         return NULL;
1906                 lpfc_nlp_init(phba, ndlp, did);
1907                 ndlp->nlp_state = NLP_STE_NPR_NODE;
1908                 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1909                 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1910                 return ndlp;
1911         }
1912         if (phba->fc_flag & FC_RSCN_MODE) {
1913                 if (lpfc_rscn_payload_check(phba, did)) {
1914                         ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1915
1916                         /* Since this node is marked for discovery,
1917                          * delay timeout is not needed.
1918                          */
1919                         if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1920                                 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1921                                 spin_unlock_irq(phba->host->host_lock);
1922                                 del_timer_sync(&ndlp->nlp_delayfunc);
1923                                 spin_lock_irq(phba->host->host_lock);
1924                                 if (!list_empty(&ndlp->els_retry_evt.
1925                                                 evt_listp))
1926                                         list_del_init(&ndlp->els_retry_evt.
1927                                                 evt_listp);
1928                         }
1929                 }
1930                 else {
1931                         ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1932                         ndlp = NULL;
1933                 }
1934         }
1935         else {
1936                 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1937                 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
1938                         return NULL;
1939                 ndlp->nlp_state = NLP_STE_NPR_NODE;
1940                 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1941                 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1942         }
1943         return ndlp;
1944 }
1945
1946 /* Build a list of nodes to discover based on the loopmap */
1947 void
1948 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1949 {
1950         int j;
1951         uint32_t alpa, index;
1952
1953         if (phba->hba_state <= LPFC_LINK_DOWN) {
1954                 return;
1955         }
1956         if (phba->fc_topology != TOPOLOGY_LOOP) {
1957                 return;
1958         }
1959
1960         /* Check for loop map present or not */
1961         if (phba->alpa_map[0]) {
1962                 for (j = 1; j <= phba->alpa_map[0]; j++) {
1963                         alpa = phba->alpa_map[j];
1964
1965                         if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1966                                 continue;
1967                         }
1968                         lpfc_setup_disc_node(phba, alpa);
1969                 }
1970         } else {
1971                 /* No alpamap, so try all alpa's */
1972                 for (j = 0; j < FC_MAXLOOP; j++) {
1973                         /* If cfg_scan_down is set, start from highest
1974                          * ALPA (0xef) to lowest (0x1).
1975                          */
1976                         if (phba->cfg_scan_down)
1977                                 index = j;
1978                         else
1979                                 index = FC_MAXLOOP - j - 1;
1980                         alpa = lpfcAlpaArray[index];
1981                         if ((phba->fc_myDID & 0xff) == alpa) {
1982                                 continue;
1983                         }
1984
1985                         lpfc_setup_disc_node(phba, alpa);
1986                 }
1987         }
1988         return;
1989 }
1990
1991 /* Start Link up / RSCN discovery on NPR list */
1992 void
1993 lpfc_disc_start(struct lpfc_hba * phba)
1994 {
1995         struct lpfc_sli *psli;
1996         LPFC_MBOXQ_t *mbox;
1997         struct lpfc_nodelist *ndlp, *next_ndlp;
1998         uint32_t did_changed, num_sent;
1999         uint32_t clear_la_pending;
2000         int rc;
2001
2002         psli = &phba->sli;
2003
2004         if (phba->hba_state <= LPFC_LINK_DOWN) {
2005                 return;
2006         }
2007         if (phba->hba_state == LPFC_CLEAR_LA)
2008                 clear_la_pending = 1;
2009         else
2010                 clear_la_pending = 0;
2011
2012         if (phba->hba_state < LPFC_HBA_READY) {
2013                 phba->hba_state = LPFC_DISC_AUTH;
2014         }
2015         lpfc_set_disctmo(phba);
2016
2017         if (phba->fc_prevDID == phba->fc_myDID) {
2018                 did_changed = 0;
2019         } else {
2020                 did_changed = 1;
2021         }
2022         phba->fc_prevDID = phba->fc_myDID;
2023         phba->num_disc_nodes = 0;
2024
2025         /* Start Discovery state <hba_state> */
2026         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2027                         "%d:0202 Start Discovery hba state x%x "
2028                         "Data: x%x x%x x%x\n",
2029                         phba->brd_no, phba->hba_state, phba->fc_flag,
2030                         phba->fc_plogi_cnt, phba->fc_adisc_cnt);
2031
2032         /* If our did changed, we MUST do PLOGI */
2033         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2034                                 nlp_listp) {
2035                 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2036                         if (did_changed) {
2037                                 spin_lock_irq(phba->host->host_lock);
2038                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2039                                 spin_unlock_irq(phba->host->host_lock);
2040                         }
2041                 }
2042         }
2043
2044         /* First do ADISCs - if any */
2045         num_sent = lpfc_els_disc_adisc(phba);
2046
2047         if (num_sent)
2048                 return;
2049
2050         if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
2051                 /* If we get here, there is nothing to ADISC */
2052                 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
2053                         phba->hba_state = LPFC_CLEAR_LA;
2054                         lpfc_clear_la(phba, mbox);
2055                         mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2056                         rc = lpfc_sli_issue_mbox(phba, mbox,
2057                                                  (MBX_NOWAIT | MBX_STOP_IOCB));
2058                         if (rc == MBX_NOT_FINISHED) {
2059                                 mempool_free( mbox, phba->mbox_mem_pool);
2060                                 lpfc_disc_flush_list(phba);
2061                                 psli->ring[(psli->ip_ring)].flag &=
2062                                         ~LPFC_STOP_IOCB_EVENT;
2063                                 psli->ring[(psli->fcp_ring)].flag &=
2064                                         ~LPFC_STOP_IOCB_EVENT;
2065                                 psli->ring[(psli->next_ring)].flag &=
2066                                         ~LPFC_STOP_IOCB_EVENT;
2067                                 phba->hba_state = LPFC_HBA_READY;
2068                         }
2069                 }
2070         } else {
2071                 /* Next do PLOGIs - if any */
2072                 num_sent = lpfc_els_disc_plogi(phba);
2073
2074                 if (num_sent)
2075                         return;
2076
2077                 if (phba->fc_flag & FC_RSCN_MODE) {
2078                         /* Check to see if more RSCNs came in while we
2079                          * were processing this one.
2080                          */
2081                         if ((phba->fc_rscn_id_cnt == 0) &&
2082                             (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2083                                 spin_lock_irq(phba->host->host_lock);
2084                                 phba->fc_flag &= ~FC_RSCN_MODE;
2085                                 spin_unlock_irq(phba->host->host_lock);
2086                         }
2087                         else
2088                                 lpfc_els_handle_rscn(phba);
2089                 }
2090         }
2091         return;
2092 }
2093
2094 /*
2095  *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2096  *  ring the match the sppecified nodelist.
2097  */
2098 static void
2099 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2100 {
2101         struct lpfc_sli *psli;
2102         IOCB_t     *icmd;
2103         struct lpfc_iocbq    *iocb, *next_iocb;
2104         struct lpfc_sli_ring *pring;
2105         struct lpfc_dmabuf   *mp;
2106
2107         psli = &phba->sli;
2108         pring = &psli->ring[LPFC_ELS_RING];
2109
2110         /* Error matching iocb on txq or txcmplq
2111          * First check the txq.
2112          */
2113         list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2114                 if (iocb->context1 != ndlp) {
2115                         continue;
2116                 }
2117                 icmd = &iocb->iocb;
2118                 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2119                     (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2120
2121                         list_del(&iocb->list);
2122                         pring->txq_cnt--;
2123                         lpfc_els_free_iocb(phba, iocb);
2124                 }
2125         }
2126
2127         /* Next check the txcmplq */
2128         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2129                 if (iocb->context1 != ndlp) {
2130                         continue;
2131                 }
2132                 icmd = &iocb->iocb;
2133                 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2134                     (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2135
2136                         iocb->iocb_cmpl = NULL;
2137                         /* context2 = cmd, context2->next = rsp, context3 =
2138                            bpl */
2139                         if (iocb->context2) {
2140                                 /* Free the response IOCB before handling the
2141                                    command. */
2142
2143                                 mp = (struct lpfc_dmabuf *) (iocb->context2);
2144                                 mp = list_get_first(&mp->list,
2145                                                     struct lpfc_dmabuf,
2146                                                     list);
2147                                 if (mp) {
2148                                         /* Delay before releasing rsp buffer to
2149                                          * give UNREG mbox a chance to take
2150                                          * effect.
2151                                          */
2152                                         list_add(&mp->list,
2153                                                 &phba->freebufList);
2154                                 }
2155                                 lpfc_mbuf_free(phba,
2156                                                ((struct lpfc_dmabuf *)
2157                                                 iocb->context2)->virt,
2158                                                ((struct lpfc_dmabuf *)
2159                                                 iocb->context2)->phys);
2160                                 kfree(iocb->context2);
2161                         }
2162
2163                         if (iocb->context3) {
2164                                 lpfc_mbuf_free(phba,
2165                                                ((struct lpfc_dmabuf *)
2166                                                 iocb->context3)->virt,
2167                                                ((struct lpfc_dmabuf *)
2168                                                 iocb->context3)->phys);
2169                                 kfree(iocb->context3);
2170                         }
2171                 }
2172         }
2173
2174         return;
2175 }
2176
2177 void
2178 lpfc_disc_flush_list(struct lpfc_hba * phba)
2179 {
2180         struct lpfc_nodelist *ndlp, *next_ndlp;
2181
2182         if (phba->fc_plogi_cnt) {
2183                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2184                                         nlp_listp) {
2185                         lpfc_free_tx(phba, ndlp);
2186                         lpfc_nlp_remove(phba, ndlp);
2187                 }
2188         }
2189         if (phba->fc_adisc_cnt) {
2190                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2191                                         nlp_listp) {
2192                         lpfc_free_tx(phba, ndlp);
2193                         lpfc_nlp_remove(phba, ndlp);
2194                 }
2195         }
2196         return;
2197 }
2198
2199 /*****************************************************************************/
2200 /*
2201  * NAME:     lpfc_disc_timeout
2202  *
2203  * FUNCTION: Fibre Channel driver discovery timeout routine.
2204  *
2205  * EXECUTION ENVIRONMENT: interrupt only
2206  *
2207  * CALLED FROM:
2208  *      Timer function
2209  *
2210  * RETURNS:
2211  *      none
2212  */
2213 /*****************************************************************************/
2214 void
2215 lpfc_disc_timeout(unsigned long ptr)
2216 {
2217         struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2218         unsigned long flags = 0;
2219
2220         if (unlikely(!phba))
2221                 return;
2222
2223         spin_lock_irqsave(phba->host->host_lock, flags);
2224         if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2225                 phba->work_hba_events |= WORKER_DISC_TMO;
2226                 if (phba->work_wait)
2227                         wake_up(phba->work_wait);
2228         }
2229         spin_unlock_irqrestore(phba->host->host_lock, flags);
2230         return;
2231 }
2232
2233 static void
2234 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2235 {
2236         struct lpfc_sli *psli;
2237         struct lpfc_nodelist *ndlp, *next_ndlp;
2238         LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2239         int rc, clrlaerr = 0;
2240
2241         if (unlikely(!phba))
2242                 return;
2243
2244         if (!(phba->fc_flag & FC_DISC_TMO))
2245                 return;
2246
2247         psli = &phba->sli;
2248
2249         spin_lock_irq(phba->host->host_lock);
2250         phba->fc_flag &= ~FC_DISC_TMO;
2251         spin_unlock_irq(phba->host->host_lock);
2252
2253         switch (phba->hba_state) {
2254
2255         case LPFC_LOCAL_CFG_LINK:
2256         /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2257                 /* FAN timeout */
2258                 lpfc_printf_log(phba,
2259                                  KERN_WARNING,
2260                                  LOG_DISCOVERY,
2261                                  "%d:0221 FAN timeout\n",
2262                                  phba->brd_no);
2263
2264                 /* Start discovery by sending FLOGI, clean up old rpis */
2265                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2266                                         nlp_listp) {
2267                         if (ndlp->nlp_type & NLP_FABRIC) {
2268                                 /* Clean up the ndlp on Fabric connections */
2269                                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
2270                         }
2271                         else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2272                                 /* Fail outstanding IO now since device
2273                                  * is marked for PLOGI.
2274                                  */
2275                                 lpfc_unreg_rpi(phba, ndlp);
2276                         }
2277                 }
2278                 phba->hba_state = LPFC_FLOGI;
2279                 lpfc_set_disctmo(phba);
2280                 lpfc_initial_flogi(phba);
2281                 break;
2282
2283         case LPFC_FLOGI:
2284         /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2285                 /* Initial FLOGI timeout */
2286                 lpfc_printf_log(phba,
2287                                  KERN_ERR,
2288                                  LOG_DISCOVERY,
2289                                  "%d:0222 Initial FLOGI timeout\n",
2290                                  phba->brd_no);
2291
2292                 /* Assume no Fabric and go on with discovery.
2293                  * Check for outstanding ELS FLOGI to abort.
2294                  */
2295
2296                 /* FLOGI failed, so just use loop map to make discovery list */
2297                 lpfc_disc_list_loopmap(phba);
2298
2299                 /* Start discovery */
2300                 lpfc_disc_start(phba);
2301                 break;
2302
2303         case LPFC_FABRIC_CFG_LINK:
2304         /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2305            NameServer login */
2306                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2307                                 "%d:0223 Timeout while waiting for NameServer "
2308                                 "login\n", phba->brd_no);
2309
2310                 /* Next look for NameServer ndlp */
2311                 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2312                 if (ndlp)
2313                         lpfc_nlp_remove(phba, ndlp);
2314                 /* Start discovery */
2315                 lpfc_disc_start(phba);
2316                 break;
2317
2318         case LPFC_NS_QRY:
2319         /* Check for wait for NameServer Rsp timeout */
2320                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2321                                 "%d:0224 NameServer Query timeout "
2322                                 "Data: x%x x%x\n",
2323                                 phba->brd_no,
2324                                 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2325
2326                 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2327                                                                 NameServer_DID);
2328                 if (ndlp) {
2329                         if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2330                                 /* Try it one more time */
2331                                 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2332                                 if (rc == 0)
2333                                         break;
2334                         }
2335                         phba->fc_ns_retry = 0;
2336                 }
2337
2338                 /* Nothing to authenticate, so CLEAR_LA right now */
2339                 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2340                 if (!clearlambox) {
2341                         clrlaerr = 1;
2342                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2343                                         "%d:0226 Device Discovery "
2344                                         "completion error\n",
2345                                         phba->brd_no);
2346                         phba->hba_state = LPFC_HBA_ERROR;
2347                         break;
2348                 }
2349
2350                 phba->hba_state = LPFC_CLEAR_LA;
2351                 lpfc_clear_la(phba, clearlambox);
2352                 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2353                 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2354                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2355                 if (rc == MBX_NOT_FINISHED) {
2356                         mempool_free(clearlambox, phba->mbox_mem_pool);
2357                         clrlaerr = 1;
2358                         break;
2359                 }
2360
2361                 /* Setup and issue mailbox INITIALIZE LINK command */
2362                 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2363                 if (!initlinkmbox) {
2364                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2365                                         "%d:0226 Device Discovery "
2366                                         "completion error\n",
2367                                         phba->brd_no);
2368                         phba->hba_state = LPFC_HBA_ERROR;
2369                         break;
2370                 }
2371
2372                 lpfc_linkdown(phba);
2373                 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2374                                phba->cfg_link_speed);
2375                 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2376                 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2377                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2378                 if (rc == MBX_NOT_FINISHED)
2379                         mempool_free(initlinkmbox, phba->mbox_mem_pool);
2380
2381                 break;
2382
2383         case LPFC_DISC_AUTH:
2384         /* Node Authentication timeout */
2385                 lpfc_printf_log(phba,
2386                                  KERN_ERR,
2387                                  LOG_DISCOVERY,
2388                                  "%d:0227 Node Authentication timeout\n",
2389                                  phba->brd_no);
2390                 lpfc_disc_flush_list(phba);
2391                 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2392                 if (!clearlambox) {
2393                         clrlaerr = 1;
2394                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2395                                         "%d:0226 Device Discovery "
2396                                         "completion error\n",
2397                                         phba->brd_no);
2398                         phba->hba_state = LPFC_HBA_ERROR;
2399                         break;
2400                 }
2401                 phba->hba_state = LPFC_CLEAR_LA;
2402                 lpfc_clear_la(phba, clearlambox);
2403                 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2404                 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2405                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2406                 if (rc == MBX_NOT_FINISHED) {
2407                         mempool_free(clearlambox, phba->mbox_mem_pool);
2408                         clrlaerr = 1;
2409                 }
2410                 break;
2411
2412         case LPFC_CLEAR_LA:
2413         /* CLEAR LA timeout */
2414                 lpfc_printf_log(phba,
2415                                  KERN_ERR,
2416                                  LOG_DISCOVERY,
2417                                  "%d:0228 CLEAR LA timeout\n",
2418                                  phba->brd_no);
2419                 clrlaerr = 1;
2420                 break;
2421
2422         case LPFC_HBA_READY:
2423                 if (phba->fc_flag & FC_RSCN_MODE) {
2424                         lpfc_printf_log(phba,
2425                                         KERN_ERR,
2426                                         LOG_DISCOVERY,
2427                                         "%d:0231 RSCN timeout Data: x%x x%x\n",
2428                                         phba->brd_no,
2429                                         phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2430
2431                         /* Cleanup any outstanding ELS commands */
2432                         lpfc_els_flush_cmd(phba);
2433
2434                         lpfc_els_flush_rscn(phba);
2435                         lpfc_disc_flush_list(phba);
2436                 }
2437                 break;
2438         }
2439
2440         if (clrlaerr) {
2441                 lpfc_disc_flush_list(phba);
2442                 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2443                 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2444                 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2445                 phba->hba_state = LPFC_HBA_READY;
2446         }
2447
2448         return;
2449 }
2450
2451 static void
2452 lpfc_nodev_timeout(unsigned long ptr)
2453 {
2454         struct lpfc_hba *phba;
2455         struct lpfc_nodelist *ndlp;
2456         unsigned long iflag;
2457         struct lpfc_work_evt  *evtp;
2458
2459         ndlp = (struct lpfc_nodelist *)ptr;
2460         phba = ndlp->nlp_phba;
2461         evtp = &ndlp->nodev_timeout_evt;
2462         spin_lock_irqsave(phba->host->host_lock, iflag);
2463
2464         if (!list_empty(&evtp->evt_listp)) {
2465                 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2466                 return;
2467         }
2468         evtp->evt_arg1  = ndlp;
2469         evtp->evt       = LPFC_EVT_NODEV_TMO;
2470         list_add_tail(&evtp->evt_listp, &phba->work_list);
2471         if (phba->work_wait)
2472                 wake_up(phba->work_wait);
2473
2474         spin_unlock_irqrestore(phba->host->host_lock, iflag);
2475         return;
2476 }
2477
2478
2479 /*
2480  * This routine handles processing a NameServer REG_LOGIN mailbox
2481  * command upon completion. It is setup in the LPFC_MBOXQ
2482  * as the completion routine when the command is
2483  * handed off to the SLI layer.
2484  */
2485 void
2486 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2487 {
2488         struct lpfc_sli *psli;
2489         MAILBOX_t *mb;
2490         struct lpfc_dmabuf *mp;
2491         struct lpfc_nodelist *ndlp;
2492
2493         psli = &phba->sli;
2494         mb = &pmb->mb;
2495
2496         ndlp = (struct lpfc_nodelist *) pmb->context2;
2497         mp = (struct lpfc_dmabuf *) (pmb->context1);
2498
2499         pmb->context1 = NULL;
2500
2501         ndlp->nlp_rpi = mb->un.varWords[0];
2502         ndlp->nlp_type |= NLP_FABRIC;
2503         ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2504         lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2505
2506         /* Start issuing Fabric-Device Management Interface (FDMI)
2507          * command to 0xfffffa (FDMI well known port)
2508          */
2509         if (phba->cfg_fdmi_on == 1) {
2510                 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2511         } else {
2512                 /*
2513                  * Delay issuing FDMI command if fdmi-on=2
2514                  * (supporting RPA/hostnmae)
2515                  */
2516                 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2517         }
2518
2519         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2520         kfree(mp);
2521         mempool_free( pmb, phba->mbox_mem_pool);
2522
2523         return;
2524 }
2525
2526 /*
2527  * This routine looks up the ndlp  lists
2528  * for the given RPI. If rpi found
2529  * it return the node list pointer
2530  * else return NULL.
2531  */
2532 struct lpfc_nodelist *
2533 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2534 {
2535         struct lpfc_nodelist *ndlp;
2536         struct list_head * lists[]={&phba->fc_nlpunmap_list,
2537                                     &phba->fc_nlpmap_list,
2538                                     &phba->fc_plogi_list,
2539                                     &phba->fc_adisc_list,
2540                                     &phba->fc_reglogin_list};
2541         int i;
2542
2543         for (i = 0; i < ARRAY_SIZE(lists); i++ )
2544                 list_for_each_entry(ndlp, lists[i], nlp_listp)
2545                         if (ndlp->nlp_rpi == rpi)
2546                                 return (ndlp);
2547
2548         return NULL;
2549 }
2550
2551 void
2552 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2553                  uint32_t did)
2554 {
2555         memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2556         INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2557         INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2558         init_timer(&ndlp->nlp_tmofunc);
2559         ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2560         ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2561         init_timer(&ndlp->nlp_delayfunc);
2562         ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2563         ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2564         ndlp->nlp_DID = did;
2565         ndlp->nlp_phba = phba;
2566         ndlp->nlp_sid = NLP_NO_SID;
2567         return;
2568 }